summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c3
-rw-r--r--drivers/acpi/acpica/exstore.c166
-rw-r--r--drivers/acpi/glue.c35
-rw-r--r--drivers/acpi/scan.c15
-rw-r--r--drivers/atm/he.c13
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/base/dma-buf.c32
-rw-r--r--drivers/base/dma-contiguous.c119
-rw-r--r--drivers/base/node.c6
-rw-r--r--drivers/bcma/host_pci.c8
-rw-r--r--drivers/block/aoe/aoe.h4
-rw-r--r--drivers/block/aoe/aoeblk.c100
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/aoe/aoedev.c10
-rw-r--r--drivers/block/cciss.c7
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/mtip32xx/Kconfig2
-rw-r--r--drivers/block/nvme-core.c585
-rw-r--r--drivers/block/nvme-scsi.c24
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/pktcdvd.c278
-rw-r--r--drivers/block/rbd.c115
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/char/random.c5
-rw-r--r--drivers/char/tpm/tpm_tis.c60
-rw-r--r--drivers/char/virtio_console.c27
-rw-r--r--drivers/clk/Kconfig8
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-bcm2835.c2
-rw-r--r--drivers/clk/clk-divider.c8
-rw-r--r--drivers/clk/clk-fixed-factor.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c1
-rw-r--r--drivers/clk/clk-gate.c7
-rw-r--r--drivers/clk/clk-mux.c19
-rw-r--r--drivers/clk/clk-nomadik.c4
-rw-r--r--drivers/clk/clk-prima2.c2
-rw-r--r--drivers/clk/clk-s2mps11.c273
-rw-r--r--drivers/clk/clk-u300.c4
-rw-r--r--drivers/clk/clk-wm831x.c16
-rw-r--r--drivers/clk/clk.c450
-rw-r--r--drivers/clk/mmp/clk-mmp2.c39
-rw-r--r--drivers/clk/mmp/clk-pxa168.c40
-rw-r--r--drivers/clk/mmp/clk-pxa910.c31
-rw-r--r--drivers/clk/mvebu/armada-370.c14
-rw-r--r--drivers/clk/mvebu/armada-xp.c12
-rw-r--r--drivers/clk/mvebu/clk-cpu.c4
-rw-r--r--drivers/clk/mvebu/common.c18
-rw-r--r--drivers/clk/mvebu/dove.c12
-rw-r--r--drivers/clk/mvebu/kirkwood.c14
-rw-r--r--drivers/clk/mxs/clk-imx23.c1
-rw-r--r--drivers/clk/mxs/clk.h4
-rw-r--r--drivers/clk/samsung/Makefile3
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c8
-rw-r--r--drivers/clk/samsung/clk-exynos4.c605
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c129
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c123
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c18
-rw-r--r--drivers/clk/samsung/clk-pll.c701
-rw-r--r--drivers/clk/samsung/clk-pll.h85
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c473
-rw-r--r--drivers/clk/samsung/clk.c10
-rw-r--r--drivers/clk/samsung/clk.h55
-rw-r--r--drivers/clk/spear/spear1310_clock.c179
-rw-r--r--drivers/clk/spear/spear1340_clock.c97
-rw-r--r--drivers/clk/spear/spear3xx_clock.c57
-rw-r--r--drivers/clk/spear/spear6xx_clock.c35
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c270
-rw-r--r--drivers/clk/tegra/clk-tegra114.c38
-rw-r--r--drivers/clk/tegra/clk-tegra20.c8
-rw-r--r--drivers/clk/tegra/clk-tegra30.c37
-rw-r--r--drivers/clk/versatile/clk-vexpress.c4
-rw-r--r--drivers/clk/zynq/clkc.c82
-rw-r--r--drivers/clk/zynq/pll.c19
-rw-r--r--drivers/clocksource/em_sti.c49
-rw-r--r--drivers/clocksource/nomadik-mtu.c3
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c12
-rw-r--r--drivers/clocksource/sh_cmt.c50
-rw-r--r--drivers/clocksource/time-armada-370-xp.c131
-rw-r--r--drivers/cpufreq/cpufreq.c152
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpuidle/Kconfig.arm10
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c209
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/amba-pl08x.c524
-rw-r--r--drivers/dma/dmaengine.c83
-rw-r--r--drivers/dma/dmatest.c182
-rw-r--r--drivers/dma/dw/Kconfig1
-rw-r--r--drivers/dma/dw/core.c39
-rw-r--r--drivers/dma/dw/platform.c1
-rw-r--r--drivers/dma/edma.c164
-rw-r--r--drivers/dma/ep93xx_dma.c10
-rw-r--r--drivers/dma/fsldma.c10
-rw-r--r--drivers/dma/imx-dma.c6
-rw-r--r--drivers/dma/imx-sdma.c179
-rw-r--r--drivers/dma/ioat/dma_v3.c26
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/k3dma.c837
-rw-r--r--drivers/dma/mmp_pdma.c320
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/mpc512x_dma.c10
-rw-r--r--drivers/dma/mv_xor.c57
-rw-r--r--drivers/dma/mv_xor.h28
-rw-r--r--drivers/dma/mxs-dma.c27
-rw-r--r--drivers/dma/of-dma.c3
-rw-r--r--drivers/dma/pch_dma.c10
-rw-r--r--drivers/dma/pl330.c179
-rw-r--r--drivers/dma/sh/Kconfig10
-rw-r--r--drivers/dma/sh/Makefile6
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c655
-rw-r--r--drivers/dma/sh/shdma-arm.h51
-rw-r--r--drivers/dma/sh/shdma-base.c26
-rw-r--r--drivers/dma/sh/shdma-of.c5
-rw-r--r--drivers/dma/sh/shdma-r8a73a4.c77
-rw-r--r--drivers/dma/sh/shdma.h16
-rw-r--r--drivers/dma/sh/shdmac.c (renamed from drivers/dma/sh/shdma.c)160
-rw-r--r--drivers/dma/sh/sudmac.c22
-rw-r--r--drivers/dma/sirf-dma.c134
-rw-r--r--drivers/dma/ste_dma40.c23
-rw-r--r--drivers/dma/tegra20-apb-dma.c8
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/txx9dmac.c22
-rw-r--r--drivers/firewire/core-cdev.c2
-rw-r--r--drivers/firewire/core-transaction.c3
-rw-r--r--drivers/firewire/ohci.c50
-rw-r--r--drivers/firmware/dmi_scan.c73
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/gpio/Kconfig14
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-octeon.c157
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c19
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c164
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h34
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c83
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c99
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c17
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c36
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c164
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c38
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c81
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h27
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c112
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/radeon/si.c21
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c43
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c1
-rw-r--r--drivers/gpu/vga/vgaarb.c51
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-core.c74
-rw-r--r--drivers/hid/hid-input.c11
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c25
-rw-r--r--drivers/hid/hid-lg2ff.c19
-rw-r--r--drivers/hid/hid-lg3ff.c29
-rw-r--r--drivers/hid/hid-lg4ff.c20
-rw-r--r--drivers/hid/hid-lgff.c17
-rw-r--r--drivers/hid/hid-logitech-dj.c10
-rw-r--r--drivers/hid/hid-multitouch.c26
-rw-r--r--drivers/hid/hid-sony.c4
-rw-r--r--drivers/hid/hid-steelseries.c5
-rw-r--r--drivers/hid/hid-zpff.c18
-rw-r--r--drivers/hwmon/amc6821.c7
-rw-r--r--drivers/hwmon/emc2103.c10
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ina2xx.c3
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/infiniband/hw/qib/Kconfig2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c747
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h26
-rw-r--r--drivers/input/evdev.c37
-rw-r--r--drivers/input/keyboard/Kconfig4
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/touchscreen/Kconfig4
-rw-r--r--drivers/iommu/Kconfig10
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/arm-smmu.c93
-rw-r--r--drivers/iommu/exynos-iommu.c44
-rw-r--r--drivers/iommu/fsl_pamu.c1309
-rw-r--r--drivers/iommu/fsl_pamu.h410
-rw-r--r--drivers/iommu/fsl_pamu_domain.c1172
-rw-r--r--drivers/iommu/fsl_pamu_domain.h85
-rw-r--r--drivers/iommu/intel-iommu.c72
-rw-r--r--drivers/iommu/msm_iommu_dev.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/irqchip/Kconfig5
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-gic.c6
-rw-r--r--drivers/irqchip/irq-imgpdc.c499
-rw-r--r--drivers/irqchip/irq-mmp.c495
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c4
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c4
-rw-r--r--drivers/isdn/hisax/avm_pci.c4
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/diva.c4
-rw-r--r--drivers/isdn/hisax/elsa.c2
-rw-r--r--drivers/isdn/hisax/elsa_ser.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/hscx_irq.c4
-rw-r--r--drivers/isdn/hisax/icc.c4
-rw-r--r--drivers/isdn/hisax/ipacx.c8
-rw-r--r--drivers/isdn/hisax/isac.c4
-rw-r--r--drivers/isdn/hisax/isar.c6
-rw-r--r--drivers/isdn/hisax/jade.c18
-rw-r--r--drivers/isdn/hisax/jade_irq.c4
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c50
-rw-r--r--drivers/isdn/hisax/netjet.c2
-rw-r--r--drivers/isdn/hisax/q931.c6
-rw-r--r--drivers/isdn/hisax/w6692.c8
-rw-r--r--drivers/leds/Kconfig34
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/leds-88pm860x.c2
-rw-r--r--drivers/leds/leds-adp5520.c6
-rw-r--r--drivers/leds/leds-asic3.c4
-rw-r--r--drivers/leds/leds-atmel-pwm.c4
-rw-r--r--drivers/leds/leds-bd2802.c2
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-da903x.c2
-rw-r--r--drivers/leds/leds-da9052.c4
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-lm3530.c2
-rw-r--r--drivers/leds/leds-lm3533.c2
-rw-r--r--drivers/leds/leds-lm355x.c2
-rw-r--r--drivers/leds/leds-lm3642.c2
-rw-r--r--drivers/leds/leds-lp3944.c7
-rw-r--r--drivers/leds/leds-lp5521.c118
-rw-r--r--drivers/leds/leds-lp5523.c325
-rw-r--r--drivers/leds/leds-lp5562.c8
-rw-r--r--drivers/leds/leds-lp55xx-common.c3
-rw-r--r--drivers/leds/leds-lp55xx-common.h66
-rw-r--r--drivers/leds/leds-lp8501.c410
-rw-r--r--drivers/leds/leds-lt3593.c4
-rw-r--r--drivers/leds/leds-netxbig.c6
-rw-r--r--drivers/leds/leds-ns2.c2
-rw-r--r--drivers/leds/leds-pca9532.c3
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-pca9633.c194
-rw-r--r--drivers/leds/leds-pca963x.c461
-rw-r--r--drivers/leds/leds-pwm.c2
-rw-r--r--drivers/leds/leds-regulator.c3
-rw-r--r--drivers/leds/leds-s3c24xx.c2
-rw-r--r--drivers/leds/leds-ss4200.c4
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds-wm831x-status.c8
-rw-r--r--drivers/leds/leds-wm8350.c2
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c30
-rw-r--r--drivers/lguest/interrupts_and_traps.c10
-rw-r--r--drivers/lguest/page_tables.c4
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bcache/btree.c43
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-bufio.c64
-rw-r--r--drivers/md/dm-cache-target.c59
-rw-r--r--drivers/md/dm-crypt.c10
-rw-r--r--drivers/md/dm-ioctl.c60
-rw-r--r--drivers/md/dm-kcopyd.c3
-rw-r--r--drivers/md/dm-raid1.c3
-rw-r--r--drivers/md/dm-stats.c969
-rw-r--r--drivers/md/dm-stats.h40
-rw-r--r--drivers/md/dm-stripe.c1
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/dm-target.c9
-rw-r--r--drivers/md/dm-thin.c122
-rw-r--r--drivers/md/dm.c70
-rw-r--r--drivers/md/dm.h27
-rw-r--r--drivers/md/md.c54
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c5
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h5
-rw-r--r--drivers/md/persistent-data/dm-btree.c28
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c77
-rw-r--r--drivers/md/raid5.c362
-rw-r--r--drivers/md/raid5.h22
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/radio/Kconfig2
-rw-r--r--drivers/memstick/core/Kconfig12
-rw-r--r--drivers/memstick/core/Makefile2
-rw-r--r--drivers/memstick/core/ms_block.c2385
-rw-r--r--drivers/memstick/core/ms_block.h290
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c5
-rw-r--r--drivers/mfd/88pm800.c12
-rw-r--r--drivers/mfd/88pm805.c4
-rw-r--r--drivers/mfd/88pm860x-core.c2
-rw-r--r--drivers/mfd/Kconfig138
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/aat2870-core.c2
-rw-r--r--drivers/mfd/ab3100-core.c2
-rw-r--r--drivers/mfd/ab8500-debugfs.c14
-rw-r--r--drivers/mfd/ab8500-gpadc.c4
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/arizona-core.c2
-rw-r--r--drivers/mfd/as3711.c2
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/mfd/da903x.c2
-rw-r--r--drivers/mfd/da9052-core.c2
-rw-r--r--drivers/mfd/da9055-core.c7
-rw-r--r--drivers/mfd/da9055-i2c.c2
-rw-r--r--drivers/mfd/da9063-core.c185
-rw-r--r--drivers/mfd/da9063-i2c.c182
-rw-r--r--drivers/mfd/da9063-irq.c193
-rw-r--r--drivers/mfd/davinci_voicecodec.c23
-rw-r--r--drivers/mfd/db8500-prcmu.c4
-rw-r--r--drivers/mfd/dm355evm_msp.c4
-rw-r--r--drivers/mfd/ezx-pcap.c6
-rw-r--r--drivers/mfd/htc-egpio.c2
-rw-r--r--drivers/mfd/htc-i2cpld.c10
-rw-r--r--drivers/mfd/htc-pasic3.c2
-rw-r--r--drivers/mfd/intel_msic.c4
-rw-r--r--drivers/mfd/kempld-core.c30
-rw-r--r--drivers/mfd/lm3533-core.c8
-rw-r--r--drivers/mfd/lp8788.c2
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/max77686.c2
-rw-r--r--drivers/mfd/max77693.c2
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/max8997.c18
-rw-r--r--drivers/mfd/max8998.c7
-rw-r--r--drivers/mfd/mcp-sa11x0.c2
-rw-r--r--drivers/mfd/menelaus.c22
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/omap-usb-host.c6
-rw-r--r--drivers/mfd/palmas.c130
-rw-r--r--drivers/mfd/pcf50633-adc.c3
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/pm8921-core.c14
-rw-r--r--drivers/mfd/rc5t583.c2
-rw-r--r--drivers/mfd/rtl8411.c94
-rw-r--r--drivers/mfd/rts5209.c63
-rw-r--r--drivers/mfd/rts5227.c115
-rw-r--r--drivers/mfd/rts5229.c53
-rw-r--r--drivers/mfd/rts5249.c110
-rw-r--r--drivers/mfd/rtsx_pcr.c76
-rw-r--r--drivers/mfd/rtsx_pcr.h32
-rw-r--r--drivers/mfd/sec-core.c40
-rw-r--r--drivers/mfd/si476x-i2c.c2
-rw-r--r--drivers/mfd/sm501.c6
-rw-r--r--drivers/mfd/sta2x11-mfd.c4
-rw-r--r--drivers/mfd/stmpe.c3
-rw-r--r--drivers/mfd/syscon.c8
-rw-r--r--drivers/mfd/t7l66xb.c8
-rw-r--r--drivers/mfd/tc3589x.c2
-rw-r--r--drivers/mfd/tc6387xb.c6
-rw-r--r--drivers/mfd/tc6393xb.c8
-rw-r--r--drivers/mfd/ti-ssp.c2
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c32
-rw-r--r--drivers/mfd/timberdale.c34
-rw-r--r--drivers/mfd/tps6105x.c2
-rw-r--r--drivers/mfd/tps65010.c24
-rw-r--r--drivers/mfd/tps65090.c2
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/tps65912-core.c2
-rw-r--r--drivers/mfd/tps80031.c2
-rw-r--r--drivers/mfd/twl-core.c2
-rw-r--r--drivers/mfd/twl4030-audio.c2
-rw-r--r--drivers/mfd/twl4030-madc.c2
-rw-r--r--drivers/mfd/twl4030-power.c8
-rw-r--r--drivers/mfd/twl6030-irq.c377
-rw-r--r--drivers/mfd/twl6040.c133
-rw-r--r--drivers/mfd/ucb1400_core.c2
-rw-r--r--drivers/mfd/ucb1x00-core.c34
-rw-r--r--drivers/mfd/wl1273-core.c8
-rw-r--r--drivers/mfd/wm5110-tables.c18
-rw-r--r--drivers/mfd/wm831x-core.c2
-rw-r--r--drivers/mfd/wm831x-irq.c2
-rw-r--r--drivers/mfd/wm831x-spi.c1
-rw-r--r--drivers/mfd/wm8350-i2c.c3
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/mfd/wm8994-core.c44
-rw-r--r--drivers/mfd/wm8994-irq.c2
-rw-r--r--drivers/misc/cb710/Kconfig2
-rw-r--r--drivers/mmc/card/block.c47
-rw-r--r--drivers/mmc/card/mmc_test.c14
-rw-r--r--drivers/mmc/core/core.c44
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/sd.c13
-rw-r--r--drivers/mmc/core/slot-gpio.c14
-rw-r--r--drivers/mmc/host/Kconfig12
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c34
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c4
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c21
-rw-r--r--drivers/mmc/host/jz4740_mmc.c7
-rw-r--r--drivers/mmc/host/mmc_spi.c49
-rw-r--r--drivers/mmc/host/mvsdio.c3
-rw-r--r--drivers/mmc/host/mxs-mmc.c22
-rw-r--r--drivers/mmc/host/of_mmc_spi.c46
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c70
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c3
-rw-r--r--drivers/mmc/host/sdhci-s3c.c8
-rw-r--r--drivers/mmc/host/sdhci-sirf.c2
-rw-r--r--drivers/mmc/host/sdhci.c7
-rw-r--r--drivers/mmc/host/sh_mmcif.c63
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c23
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c24
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mtd/bcm63xxpart.c9
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c4
-rw-r--r--drivers/mtd/chips/gen_probe.c4
-rw-r--r--drivers/mtd/chips/jedec_probe.c13
-rw-r--r--drivers/mtd/devices/Kconfig55
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c275
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h2
-rw-r--r--drivers/mtd/devices/block2mtd.c58
-rw-r--r--drivers/mtd/devices/elm.c129
-rw-r--r--drivers/mtd/devices/m25p80.c81
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c10
-rw-r--r--drivers/mtd/devices/spear_smi.c19
-rw-r--r--drivers/mtd/devices/sst25l.c8
-rw-r--r--drivers/mtd/maps/Kconfig18
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c2
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c10
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c2
-rw-r--r--drivers/mtd/maps/impa7.c10
-rw-r--r--drivers/mtd/maps/ixp4xx.c6
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c5
-rw-r--r--drivers/mtd/maps/octagon-5066.c246
-rw-r--r--drivers/mtd/maps/physmap.c7
-rw-r--r--drivers/mtd/maps/plat-ram.c6
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c4
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c5
-rw-r--r--drivers/mtd/maps/sa1100-flash.c5
-rw-r--r--drivers/mtd/maps/vmax301.c196
-rw-r--r--drivers/mtd/mtdcore.c11
-rw-r--r--drivers/mtd/mtdpart.c1
-rw-r--r--drivers/mtd/mtdswap.c2
-rw-r--r--drivers/mtd/nand/Kconfig12
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/alauda.c723
-rw-r--r--drivers/mtd/nand/ams-delta.c1
-rw-r--r--drivers/mtd/nand/atmel_nand.c923
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h98
-rw-r--r--drivers/mtd/nand/au1550nd.c2
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c5
-rw-r--r--drivers/mtd/nand/cs553x_nand.c6
-rw-r--r--drivers/mtd/nand/davinci_nand.c19
-rw-r--r--drivers/mtd/nand/denali.c2
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/docg4.c8
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c3
-rw-r--r--drivers/mtd/nand/fsmc_nand.c38
-rw-r--r--drivers/mtd/nand/gpio.c231
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c268
-rw-r--r--drivers/mtd/nand/jz4740_nand.c6
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c4
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c4
-rw-r--r--drivers/mtd/nand/mxc_nand.c12
-rw-r--r--drivers/mtd/nand/nand_base.c296
-rw-r--r--drivers/mtd/nand/nand_bbt.c194
-rw-r--r--drivers/mtd/nand/nand_ids.c8
-rw-r--r--drivers/mtd/nand/nandsim.c39
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c5
-rw-r--r--drivers/mtd/nand/orion_nand.c6
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c384
-rw-r--r--drivers/mtd/nand/r852.c49
-rw-r--r--drivers/mtd/nand/s3c2410.c4
-rw-r--r--drivers/mtd/nand/sh_flctl.c4
-rw-r--r--drivers/mtd/nand/sharpsl.c5
-rw-r--r--drivers/mtd/nand/sm_common.c9
-rw-r--r--drivers/mtd/nand/tmio_nand.c2
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c13
-rw-r--r--drivers/mtd/ofpart.c18
-rw-r--r--drivers/mtd/onenand/generic.c4
-rw-r--r--drivers/mtd/onenand/omap2.c3
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c1
-rw-r--r--drivers/mtd/onenand/samsung.c3
-rw-r--r--drivers/mtd/sm_ftl.c26
-rw-r--r--drivers/mtd/tests/Makefile9
-rw-r--r--drivers/mtd/tests/mtd_test.c114
-rw-r--r--drivers/mtd/tests/mtd_test.h11
-rw-r--r--drivers/mtd/tests/nandbiterrs.c (renamed from drivers/mtd/tests/mtd_nandbiterrs.c)41
-rw-r--r--drivers/mtd/tests/oobtest.c (renamed from drivers/mtd/tests/mtd_oobtest.c)102
-rw-r--r--drivers/mtd/tests/pagetest.c (renamed from drivers/mtd/tests/mtd_pagetest.c)271
-rw-r--r--drivers/mtd/tests/readtest.c (renamed from drivers/mtd/tests/mtd_readtest.c)61
-rw-r--r--drivers/mtd/tests/speedtest.c (renamed from drivers/mtd/tests/mtd_speedtest.c)210
-rw-r--r--drivers/mtd/tests/stresstest.c (renamed from drivers/mtd/tests/mtd_stresstest.c)101
-rw-r--r--drivers/mtd/tests/subpagetest.c (renamed from drivers/mtd/tests/mtd_subpagetest.c)97
-rw-r--r--drivers/mtd/tests/torturetest.c (renamed from drivers/mtd/tests/mtd_torturetest.c)66
-rw-r--r--drivers/mtd/ubi/fastmap.c5
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_alb.h9
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/bonding/bond_sysfs.c70
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c4
-rw-r--r--drivers/net/ethernet/amd/declance.c1
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c44
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c21
-rw-r--r--drivers/net/ethernet/intel/Kconfig18
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h558
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c983
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h112
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2076
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2041
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2076
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1449
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c366
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h245
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c1006
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7375
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c391
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h239
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h4688
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1817
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1154
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h368
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2335
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h120
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c3
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c58
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/irda/donauboe.c6
-rw-r--r--drivers/net/irda/mcs7780.c40
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/cdc_ether.c115
-rw-r--r--drivers/net/usb/qmi_wwan.c130
-rw-r--r--drivers/net/vxlan.c40
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c382
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h120
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c70
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h35
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c144
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c241
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c314
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c285
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c244
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c446
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h73
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c67
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c232
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h71
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c92
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c145
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h218
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c91
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c446
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c456
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c126
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h108
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c107
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c167
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c195
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c14
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c181
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h49
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c805
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4657
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1036
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c62
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.h33
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2126
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h127
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h160
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h238
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c234
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c8
-rw-r--r--drivers/net/wireless/libertas/if_spi.c2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c4
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mwl8k.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/Kconfig2
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h44
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c222
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c18
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c29
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c18
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c187
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c29
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c6
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/Kconfig2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c95
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h33
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c158
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h11
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c28
-rw-r--r--drivers/net/xen-netback/netback.c94
-rw-r--r--drivers/net/xen-netback/xenbus.c17
-rw-r--r--drivers/ntb/Kconfig2
-rw-r--r--drivers/ntb/ntb_hw.c501
-rw-r--r--drivers/ntb/ntb_hw.h105
-rw-r--r--drivers/ntb/ntb_regs.h50
-rw-r--r--drivers/ntb/ntb_transport.c422
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c200
-rw-r--r--drivers/of/fdt.c174
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_net.c2
-rw-r--r--drivers/of/of_reserved_mem.c173
-rw-r--r--drivers/of/platform.c25
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c61
-rw-r--r--drivers/pci/msi.c22
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/amilo-rfkill.c7
-rw-r--r--drivers/platform/x86/apple-gmux.c18
-rw-r--r--drivers/platform/x86/classmate-laptop.c2
-rw-r--r--drivers/platform/x86/compal-laptop.c7
-rw-r--r--drivers/platform/x86/hp-wmi.c16
-rw-r--r--drivers/platform/x86/intel-rst.c13
-rw-r--r--drivers/platform/x86/intel-smartconnect.c13
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c1
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c1
-rw-r--r--drivers/platform/x86/panasonic-laptop.c25
-rw-r--r--drivers/platform/x86/samsung-q10.c65
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c23
-rw-r--r--drivers/platform/x86/wmi.c4
-rw-r--r--drivers/pnp/driver.c13
-rw-r--r--drivers/power/Kconfig16
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ab8500_charger.c1
-rw-r--r--drivers/power/bq24190_charger.c1549
-rw-r--r--drivers/power/collie_battery.c2
-rw-r--r--drivers/power/max8925_power.c1
-rw-r--r--drivers/power/power_supply_core.c38
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/msm-poweroff.c73
-rw-r--r--drivers/power/reset/xgene-reboot.c103
-rw-r--r--drivers/power/rx51_battery.c14
-rw-r--r--drivers/power/tosa_battery.c2
-rw-r--r--drivers/power/twl4030_charger.c7
-rw-r--r--drivers/power/twl4030_madc_battery.c245
-rw-r--r--drivers/pps/clients/Kconfig2
-rw-r--r--drivers/pps/clients/pps-gpio.c1
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-cmos.c24
-rw-r--r--drivers/rtc/rtc-ds1511.c17
-rw-r--r--drivers/rtc/rtc-ds1553.c13
-rw-r--r--drivers/rtc/rtc-ds1742.c26
-rw-r--r--drivers/rtc/rtc-ep93xx.c14
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c22
-rw-r--r--drivers/rtc/rtc-imxdi.c16
-rw-r--r--drivers/rtc/rtc-lpc32xx.c24
-rw-r--r--drivers/rtc/rtc-max77686.c4
-rw-r--r--drivers/rtc/rtc-moxart.c330
-rw-r--r--drivers/rtc/rtc-mv.c17
-rw-r--r--drivers/rtc/rtc-mxc.c14
-rw-r--r--drivers/rtc/rtc-nuc900.c2
-rw-r--r--drivers/rtc/rtc-omap.c60
-rw-r--r--drivers/rtc/rtc-palmas.c35
-rw-r--r--drivers/rtc/rtc-pcf2127.c6
-rw-r--r--drivers/rtc/rtc-sirfsoc.c13
-rw-r--r--drivers/rtc/rtc-stk17ta8.c15
-rw-r--r--drivers/rtc/rtc-tx4939.c14
-rw-r--r--drivers/s390/block/dasd_diag.c4
-rw-r--r--drivers/s390/char/fs3270.c6
-rw-r--r--drivers/s390/char/sclp.c6
-rw-r--r--drivers/s390/char/tty3270.c6
-rw-r--r--drivers/s390/char/zcore.c6
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c11
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c8
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c7
-rw-r--r--drivers/scsi/fnic/fnic.h8
-rw-r--r--drivers/scsi/fnic/fnic_main.c145
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c141
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h4
-rw-r--r--drivers/scsi/hpsa.c54
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c154
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h46
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c92
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h117
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c417
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c200
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h31
-rw-r--r--drivers/scsi/mpt3sas/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c59
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/sd.c11
-rw-r--r--drivers/scsi/ufs/ufs.h1
-rw-r--r--drivers/scsi/ufs/ufshcd.c328
-rw-r--r--drivers/scsi/ufs/ufshcd.h54
-rw-r--r--drivers/scsi/ufs/ufshci.h22
-rw-r--r--drivers/scsi/ufs/unipro.h151
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/staging/android/ashmem.c44
-rw-r--r--drivers/staging/android/logger.c2
-rw-r--r--drivers/staging/android/lowmemorykiller.c43
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h38
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c148
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c98
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c76
-rw-r--r--drivers/staging/octeon/ethernet-mem.c7
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c5
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/iscsi/iscsi_target.c83
-rw-r--r--drivers/target/iscsi/iscsi_target.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c186
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c367
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c29
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c167
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c45
-rw-r--r--drivers/target/loopback/tcm_loop.c2
-rw-r--r--drivers/target/target_core_alua.c39
-rw-r--r--drivers/target/target_core_configfs.c60
-rw-r--r--drivers/target/target_core_device.c44
-rw-r--r--drivers/target/target_core_fabric_configfs.c18
-rw-r--r--drivers/target/target_core_fabric_lib.c2
-rw-r--r--drivers/target/target_core_file.c12
-rw-r--r--drivers/target/target_core_hba.c2
-rw-r--r--drivers/target/target_core_iblock.c12
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c4
-rw-r--r--drivers/target/target_core_pscsi.c7
-rw-r--r--drivers/target/target_core_rd.c8
-rw-r--r--drivers/target/target_core_sbc.c257
-rw-r--r--drivers/target/target_core_spc.c27
-rw-r--r--drivers/target/target_core_stat.c2
-rw-r--r--drivers/target/target_core_tmr.c2
-rw-r--r--drivers/target/target_core_tpg.c2
-rw-r--r--drivers/target/target_core_transport.c170
-rw-r--r--drivers/target/target_core_ua.c2
-rw-r--r--drivers/target/target_core_xcopy.c1081
-rw-r--r--drivers/target/target_core_xcopy.h62
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/thermal/Kconfig33
-rw-r--r--drivers/thermal/Makefile6
-rw-r--r--drivers/thermal/cpu_cooling.c8
-rw-r--r--drivers/thermal/exynos_thermal.c1059
-rw-r--r--drivers/thermal/imx_thermal.c541
-rw-r--r--drivers/thermal/samsung/Kconfig18
-rw-r--r--drivers/thermal/samsung/Makefile7
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c432
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.h107
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c762
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h311
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c250
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h155
-rw-r--r--drivers/thermal/step_wise.c32
-rw-r--r--drivers/thermal/thermal_core.c282
-rw-r--r--drivers/thermal/thermal_hwmon.c269
-rw-r--r--drivers/thermal/thermal_hwmon.h49
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-thermal-data.c5
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c6
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c7
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/inode.c9
-rw-r--r--drivers/usb/host/Kconfig1
-rw-r--r--drivers/usb/musb/Kconfig1
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig2
-rw-r--r--drivers/vfio/pci/vfio_pci.c286
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c11
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c35
-rw-r--r--drivers/vfio/vfio.c64
-rw-r--r--drivers/vhost/scsi.c136
-rw-r--r--drivers/video/acornfb.c266
-rw-r--r--drivers/video/acornfb.h29
-rw-r--r--drivers/video/logo/logo_linux_clut224.ppm2481
-rw-r--r--drivers/video/ps3fb.c2
-rw-r--r--drivers/virtio/virtio_pci.c4
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/w1/w1.c12
-rw-r--r--drivers/watchdog/Kconfig10
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/ar7_wdt.c5
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/watchdog/nuc900_wdt.c5
-rw-r--r--drivers/watchdog/s3c2410_wdt.c228
-rw-r--r--drivers/watchdog/sunxi_wdt.c237
-rw-r--r--drivers/watchdog/ts72xx_wdt.c10
-rw-r--r--drivers/xen/balloon.c13
1086 files changed, 83532 insertions, 17364 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6a38218..fb78bb9 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -257,12 +257,13 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
pdata->mmio_size = resource_size(&rentry->res);
pdata->mmio_base = ioremap(rentry->res.start,
pdata->mmio_size);
- pdata->dev_desc = dev_desc;
break;
}
acpi_dev_free_resource_list(&resource_list);
+ pdata->dev_desc = dev_desc;
+
if (dev_desc->clk_required) {
ret = register_device_clock(adev, pdata);
if (ret) {
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 2bdba6f..f0b09bf 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -57,6 +57,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
union acpi_operand_object *dest_desc,
struct acpi_walk_state *walk_state);
+static acpi_status
+acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
+ struct acpi_namespace_node *node,
+ struct acpi_walk_state *walk_state);
+
/*******************************************************************************
*
* FUNCTION: acpi_ex_store
@@ -375,7 +380,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
* When storing into an object the data is converted to the
* target object type then stored in the object. This means
* that the target object type (for an initialized target) will
- * not be changed by a store operation.
+ * not be changed by a store operation. A copy_object can change
+ * the target type, however.
+ *
+ * The implicit_conversion flag is set to NO/FALSE only when
+ * storing to an arg_x -- as per the rules of the ACPI spec.
*
* Assumes parameters are already validated.
*
@@ -399,7 +408,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
target_type = acpi_ns_get_type(node);
target_desc = acpi_ns_get_attached_object(node);
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p (%s) to node %p (%s)\n",
source_desc,
acpi_ut_get_object_type_name(source_desc), node,
acpi_ut_get_type_name(target_type)));
@@ -413,45 +422,30 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
return_ACPI_STATUS(status);
}
- /* If no implicit conversion, drop into the default case below */
-
- if ((!implicit_conversion) ||
- ((walk_state->opcode == AML_COPY_OP) &&
- (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
- (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
- (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
- /*
- * Force execution of default (no implicit conversion). Note:
- * copy_object does not perform an implicit conversion, as per the ACPI
- * spec -- except in case of region/bank/index fields -- because these
- * objects must retain their original type permanently.
- */
- target_type = ACPI_TYPE_ANY;
- }
-
/* Do the actual store operation */
switch (target_type) {
- case ACPI_TYPE_BUFFER_FIELD:
- case ACPI_TYPE_LOCAL_REGION_FIELD:
- case ACPI_TYPE_LOCAL_BANK_FIELD:
- case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
- /* For fields, copy the source data to the target field. */
-
- status = acpi_ex_write_data_to_field(source_desc, target_desc,
- &walk_state->result_obj);
- break;
-
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
/*
- * These target types are all of type Integer/String/Buffer, and
- * therefore support implicit conversion before the store.
- *
- * Copy and/or convert the source object to a new target object
+ * The simple data types all support implicit source operand
+ * conversion before the store.
*/
+
+ if ((walk_state->opcode == AML_COPY_OP) || !implicit_conversion) {
+ /*
+ * However, copy_object and Stores to arg_x do not perform
+ * an implicit conversion, as per the ACPI specification.
+ * A direct store is performed instead.
+ */
+ status = acpi_ex_store_direct_to_node(source_desc, node,
+ walk_state);
+ break;
+ }
+
+ /* Store with implicit source operand conversion support */
+
status =
acpi_ex_store_object_to_object(source_desc, target_desc,
&new_desc, walk_state);
@@ -465,13 +459,12 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
* the Name's type to that of the value being stored in it.
* source_desc reference count is incremented by attach_object.
*
- * Note: This may change the type of the node if an explicit store
- * has been performed such that the node/object type has been
- * changed.
+ * Note: This may change the type of the node if an explicit
+ * store has been performed such that the node/object type
+ * has been changed.
*/
- status =
- acpi_ns_attach_object(node, new_desc,
- new_desc->common.type);
+ status = acpi_ns_attach_object(node, new_desc,
+ new_desc->common.type);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Store %s into %s via Convert/Attach\n",
@@ -482,38 +475,83 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
}
break;
- default:
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Storing [%s] (%p) directly into node [%s] (%p)"
- " with no implicit conversion\n",
- acpi_ut_get_object_type_name(source_desc),
- source_desc,
- acpi_ut_get_object_type_name(target_desc),
- node));
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
+ /*
+ * For all fields, always write the source data to the target
+ * field. Any required implicit source operand conversion is
+ * performed in the function below as necessary. Note, field
+ * objects must retain their original type permanently.
+ */
+ status = acpi_ex_write_data_to_field(source_desc, target_desc,
+ &walk_state->result_obj);
+ break;
+ default:
/*
* No conversions for all other types. Directly store a copy of
- * the source object. NOTE: This is a departure from the ACPI
- * spec, which states "If conversion is impossible, abort the
- * running control method".
+ * the source object. This is the ACPI spec-defined behavior for
+ * the copy_object operator.
*
- * This code implements "If conversion is impossible, treat the
- * Store operation as a CopyObject".
+ * NOTE: For the Store operator, this is a departure from the
+ * ACPI spec, which states "If conversion is impossible, abort
+ * the running control method". Instead, this code implements
+ * "If conversion is impossible, treat the Store operation as
+ * a CopyObject".
*/
- status =
- acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc,
- walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status =
- acpi_ns_attach_object(node, new_desc,
- new_desc->common.type);
- acpi_ut_remove_reference(new_desc);
+ status = acpi_ex_store_direct_to_node(source_desc, node,
+ walk_state);
break;
}
return_ACPI_STATUS(status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_store_direct_to_node
+ *
+ * PARAMETERS: source_desc - Value to be stored
+ * node - Named object to receive the value
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: "Store" an object directly to a node. This involves a copy
+ * and an attach.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
+ struct acpi_namespace_node *node,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_operand_object *new_desc;
+
+ ACPI_FUNCTION_TRACE(ex_store_direct_to_node);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Storing [%s] (%p) directly into node [%s] (%p)"
+ " with no implicit conversion\n",
+ acpi_ut_get_object_type_name(source_desc),
+ source_desc, acpi_ut_get_type_name(node->type),
+ node));
+
+ /* Copy the source object to a new object */
+
+ status =
+ acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Attach the new object to the node */
+
+ status = acpi_ns_attach_object(node, new_desc, new_desc->common.type);
+ acpi_ut_remove_reference(new_desc);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 9467229..10f0f40 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -79,6 +79,9 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
return ret;
}
+#define FIND_CHILD_MIN_SCORE 1
+#define FIND_CHILD_MAX_SCORE 2
+
static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
void *not_used, void **ret_p)
{
@@ -92,14 +95,17 @@ static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
}
-static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
+static int do_find_child_checks(acpi_handle handle, bool is_bridge)
{
+ bool sta_present = true;
unsigned long long sta;
acpi_status status;
- status = acpi_bus_get_status_handle(handle, &sta);
- if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
- return false;
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ if (status == AE_NOT_FOUND)
+ sta_present = false;
+ else if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
+ return -ENODEV;
if (is_bridge) {
void *test = NULL;
@@ -107,16 +113,17 @@ static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
/* Check if this object has at least one child device. */
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
acpi_dev_present, NULL, NULL, &test);
- return !!test;
+ if (!test)
+ return -ENODEV;
}
- return true;
+ return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
struct find_child_context {
u64 addr;
bool is_bridge;
acpi_handle ret;
- bool ret_checked;
+ int ret_score;
};
static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
@@ -125,6 +132,7 @@ static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
struct find_child_context *context = data;
unsigned long long addr;
acpi_status status;
+ int score;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
if (ACPI_FAILURE(status) || addr != context->addr)
@@ -144,15 +152,20 @@ static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
* its handle if so. Second, check the same for the object that we've
* just found.
*/
- if (!context->ret_checked) {
- if (acpi_extra_checks_passed(context->ret, context->is_bridge))
+ if (!context->ret_score) {
+ score = do_find_child_checks(context->ret, context->is_bridge);
+ if (score == FIND_CHILD_MAX_SCORE)
return AE_CTRL_TERMINATE;
else
- context->ret_checked = true;
+ context->ret_score = score;
}
- if (acpi_extra_checks_passed(handle, context->is_bridge)) {
+ score = do_find_child_checks(handle, context->is_bridge);
+ if (score == FIND_CHILD_MAX_SCORE) {
context->ret = handle;
return AE_CTRL_TERMINATE;
+ } else if (score > context->ret_score) {
+ context->ret = handle;
+ context->ret_score = score;
}
return AE_OK;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 61d090b..fbdb82e 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -204,8 +204,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return -EINVAL;
}
- lock_device_hotplug();
-
/*
* Carry out two passes here and ignore errors in the first pass,
* because if the devices in question are memory blocks and
@@ -236,9 +234,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
ACPI_UINT32_MAX,
acpi_bus_online_companions, NULL,
NULL, NULL);
-
- unlock_device_hotplug();
-
put_device(&device->dev);
return -EBUSY;
}
@@ -249,8 +244,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
acpi_bus_trim(device);
- unlock_device_hotplug();
-
/* Device node has been unregistered. */
put_device(&device->dev);
device = NULL;
@@ -289,6 +282,7 @@ static void acpi_bus_device_eject(void *context)
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
int error;
+ lock_device_hotplug();
mutex_lock(&acpi_scan_lock);
acpi_bus_get_device(handle, &device);
@@ -312,6 +306,7 @@ static void acpi_bus_device_eject(void *context)
out:
mutex_unlock(&acpi_scan_lock);
+ unlock_device_hotplug();
return;
err_out:
@@ -326,8 +321,8 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
int error;
- mutex_lock(&acpi_scan_lock);
lock_device_hotplug();
+ mutex_lock(&acpi_scan_lock);
if (ost_source != ACPI_NOTIFY_BUS_CHECK) {
acpi_bus_get_device(handle, &device);
@@ -353,9 +348,9 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
out:
- unlock_device_hotplug();
acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL);
mutex_unlock(&acpi_scan_lock);
+ unlock_device_hotplug();
}
static void acpi_scan_bus_check(void *context)
@@ -446,6 +441,7 @@ void acpi_bus_hot_remove_device(void *context)
acpi_handle handle = device->handle;
int error;
+ lock_device_hotplug();
mutex_lock(&acpi_scan_lock);
error = acpi_scan_hot_remove(device);
@@ -455,6 +451,7 @@ void acpi_bus_hot_remove_device(void *context)
NULL);
mutex_unlock(&acpi_scan_lock);
+ unlock_device_hotplug();
kfree(context);
}
EXPORT_SYMBOL(acpi_bus_hot_remove_device);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 449f629..8557adc 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2865,15 +2865,4 @@ static struct pci_driver he_driver = {
.id_table = he_pci_tbl,
};
-static int __init he_init(void)
-{
- return pci_register_driver(&he_driver);
-}
-
-static void __exit he_cleanup(void)
-{
- pci_unregister_driver(&he_driver);
-}
-
-module_init(he_init);
-module_exit(he_cleanup);
+module_pci_driver(he_driver);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 409502a..5aca5f4 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -778,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
return error;
}
- if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) {
+ if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
card->atmdev->esi, 6);
if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1219ab7..1e16cbd 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -77,9 +77,36 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
return dmabuf->ops->mmap(dmabuf, vma);
}
+static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct dma_buf *dmabuf;
+ loff_t base;
+
+ if (!is_dma_buf_file(file))
+ return -EBADF;
+
+ dmabuf = file->private_data;
+
+ /* only support discovering the end of the buffer,
+ but also allow SEEK_SET to maintain the idiomatic
+ SEEK_END(0), SEEK_CUR(0) pattern */
+ if (whence == SEEK_END)
+ base = dmabuf->size;
+ else if (whence == SEEK_SET)
+ base = 0;
+ else
+ return -EINVAL;
+
+ if (offset != 0)
+ return -EINVAL;
+
+ return base + offset;
+}
+
static const struct file_operations dma_buf_fops = {
.release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
+ .llseek = dma_buf_llseek,
};
/*
@@ -133,7 +160,12 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
dmabuf->exp_name = exp_name;
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+ if (IS_ERR(file)) {
+ kfree(dmabuf);
+ return ERR_CAST(file);
+ }
+ file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
mutex_init(&dmabuf->lock);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6c9cdaa..99802d6f 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
#endif
/**
- * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
* @limit: End address of the reserved memory (optional, 0 for any).
*
* This function reserves memory from early allocator. It should be
@@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
#endif
}
- if (selected_size) {
+ if (selected_size && !dma_contiguous_default_area) {
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
- dma_declare_contiguous(NULL, selected_size, 0, limit);
+ dma_contiguous_reserve_area(selected_size, 0, limit,
+ &dma_contiguous_default_area);
}
};
static DEFINE_MUTEX(cma_mutex);
-static int __init cma_activate_area(unsigned long base_pfn, unsigned long count)
+static int __init cma_activate_area(struct cma *cma)
{
- unsigned long pfn = base_pfn;
- unsigned i = count >> pageblock_order;
+ int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
+ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+ unsigned i = cma->count >> pageblock_order;
struct zone *zone;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ return -ENOMEM;
+
WARN_ON_ONCE(!pfn_valid(pfn));
zone = page_zone(pfn_to_page(pfn));
@@ -153,92 +160,53 @@ static int __init cma_activate_area(unsigned long base_pfn, unsigned long count)
}
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i);
- return 0;
-}
-
-static struct cma * __init cma_create_area(unsigned long base_pfn,
- unsigned long count)
-{
- int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
- struct cma *cma;
- int ret = -ENOMEM;
-
- pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
-
- cma = kmalloc(sizeof *cma, GFP_KERNEL);
- if (!cma)
- return ERR_PTR(-ENOMEM);
-
- cma->base_pfn = base_pfn;
- cma->count = count;
- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!cma->bitmap)
- goto no_mem;
-
- ret = cma_activate_area(base_pfn, count);
- if (ret)
- goto error;
-
- pr_debug("%s: returned %p\n", __func__, (void *)cma);
- return cma;
-
-error:
- kfree(cma->bitmap);
-no_mem:
- kfree(cma);
- return ERR_PTR(ret);
+ return 0;
}
-static struct cma_reserved {
- phys_addr_t start;
- unsigned long size;
- struct device *dev;
-} cma_reserved[MAX_CMA_AREAS] __initdata;
-static unsigned cma_reserved_count __initdata;
+static struct cma cma_areas[MAX_CMA_AREAS];
+static unsigned cma_area_count;
static int __init cma_init_reserved_areas(void)
{
- struct cma_reserved *r = cma_reserved;
- unsigned i = cma_reserved_count;
-
- pr_debug("%s()\n", __func__);
+ int i;
- for (; i; --i, ++r) {
- struct cma *cma;
- cma = cma_create_area(PFN_DOWN(r->start),
- r->size >> PAGE_SHIFT);
- if (!IS_ERR(cma))
- dev_set_cma_area(r->dev, cma);
+ for (i = 0; i < cma_area_count; i++) {
+ int ret = cma_activate_area(&cma_areas[i]);
+ if (ret)
+ return ret;
}
+
return 0;
}
core_initcall(cma_init_reserved_areas);
/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+ * @size: Size of the reserved area (in bytes),
+ * @base: Base address of the reserved area optional, use 0 for any
* @limit: End address of the reserved memory (optional, 0 for any).
+ * @res_cma: Pointer to store the created cma region.
*
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
*/
-int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma)
{
- struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ struct cma *cma = &cma_areas[cma_area_count];
phys_addr_t alignment;
+ int ret = 0;
pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
(unsigned long)size, (unsigned long)base,
(unsigned long)limit);
/* Sanity checks */
- if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
}
@@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
if (base) {
if (memblock_is_region_reserved(base, size) ||
memblock_reserve(base, size) < 0) {
- base = -EBUSY;
+ ret = -EBUSY;
goto err;
}
} else {
@@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
*/
phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
if (!addr) {
- base = -ENOMEM;
+ ret = -ENOMEM;
goto err;
} else {
base = addr;
@@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
* Each reserved area must be initialised later, when more kernel
* subsystems (like slab allocator) are available.
*/
- r->start = base;
- r->size = size;
- r->dev = dev;
- cma_reserved_count++;
+ cma->base_pfn = PFN_DOWN(base);
+ cma->count = size >> PAGE_SHIFT;
+ *res_cma = cma;
+ cma_area_count++;
+
pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
(unsigned long)base);
@@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
return 0;
err:
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
- return base;
+ return ret;
}
/**
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 7616a77..bc9f43b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -125,13 +125,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(nid, NR_WRITEBACK)),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- nid, K(node_page_state(nid, NR_ANON_PAGES)
- + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR),
-#else
nid, K(node_page_state(nid, NR_ANON_PAGES)),
-#endif
nid, K(node_page_state(nid, NR_SHMEM)),
nid, node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index a355e63..6fb98b5 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -188,8 +188,11 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
/* SSB needed additional powering up, do we have any AMBA PCI cards? */
- if (!pci_is_pcie(dev))
- bcma_err(bus, "PCI card detected, report problems.\n");
+ if (!pci_is_pcie(dev)) {
+ bcma_err(bus, "PCI card detected, they are not supported.\n");
+ err = -ENXIO;
+ goto err_pci_release_regions;
+ }
/* Map MMIO */
err = -ENOMEM;
@@ -269,6 +272,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 025c41d..14a9d19 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
-#define VERSION "83"
+#define VERSION "85"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -169,6 +169,7 @@ struct aoedev {
ulong ref;
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
+ struct dentry *debugfs;
struct request_queue *blkq;
struct hd_geometry geo;
sector_t ssize;
@@ -206,6 +207,7 @@ struct ktstate {
int aoeblk_init(void);
void aoeblk_exit(void);
void aoeblk_gdalloc(void *);
+void aoedisk_rm_debugfs(struct aoedev *d);
void aoedisk_rm_sysfs(struct aoedev *d);
int aoechr_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 916d9ed..dd73e1f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
+/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoeblk.c
* block device routines
@@ -17,11 +17,13 @@
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
#include <scsi/sg.h>
#include "aoe.h"
static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
+static struct dentry *aoe_debugfs_dir;
/* GPFS needs a larger value than the default. */
static int aoe_maxsectors;
@@ -108,6 +110,55 @@ static ssize_t aoedisk_show_payload(struct device *dev,
return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
}
+static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
+{
+ struct aoedev *d;
+ struct aoetgt **t, **te;
+ struct aoeif *ifp, *ife;
+ unsigned long flags;
+ char c;
+
+ d = s->private;
+ seq_printf(s, "rttavg: %d rttdev: %d\n",
+ d->rttavg >> RTTSCALE,
+ d->rttdev >> RTTDSCALE);
+ seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
+ seq_printf(s, "kicked: %ld\n", d->kicked);
+ seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
+ seq_printf(s, "ref: %ld\n", d->ref);
+
+ spin_lock_irqsave(&d->lock, flags);
+ t = d->targets;
+ te = t + d->ntargets;
+ for (; t < te && *t; t++) {
+ c = '\t';
+ seq_printf(s, "falloc: %ld\n", (*t)->falloc);
+ seq_printf(s, "ffree: %p\n",
+ list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
+ seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
+ (*t)->maxout, (*t)->nframes);
+ seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
+ seq_printf(s, "\ttaint:%d\n", (*t)->taint);
+ seq_printf(s, "\tr:%d\n", (*t)->rpkts);
+ seq_printf(s, "\tw:%d\n", (*t)->wpkts);
+ ifp = (*t)->ifs;
+ ife = ifp + ARRAY_SIZE((*t)->ifs);
+ for (; ifp->nd && ifp < ife; ifp++) {
+ seq_printf(s, "%c%s", c, ifp->nd->name);
+ c = ',';
+ }
+ seq_puts(s, "\n");
+ }
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ return 0;
+}
+
+static int aoe_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aoedisk_debugfs_show, inode->i_private);
+}
+
static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
@@ -130,6 +181,44 @@ static const struct attribute_group attr_group = {
.attrs = aoe_attrs,
};
+static const struct file_operations aoe_debugfs_fops = {
+ .open = aoe_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void
+aoedisk_add_debugfs(struct aoedev *d)
+{
+ struct dentry *entry;
+ char *p;
+
+ if (aoe_debugfs_dir == NULL)
+ return;
+ p = strchr(d->gd->disk_name, '/');
+ if (p == NULL)
+ p = d->gd->disk_name;
+ else
+ p++;
+ BUG_ON(*p == '\0');
+ entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
+ &aoe_debugfs_fops);
+ if (IS_ERR_OR_NULL(entry)) {
+ pr_info("aoe: cannot create debugfs file for %s\n",
+ d->gd->disk_name);
+ return;
+ }
+ BUG_ON(d->debugfs);
+ d->debugfs = entry;
+}
+void
+aoedisk_rm_debugfs(struct aoedev *d)
+{
+ debugfs_remove(d->debugfs);
+ d->debugfs = NULL;
+}
+
static int
aoedisk_add_sysfs(struct aoedev *d)
{
@@ -330,6 +419,7 @@ aoeblk_gdalloc(void *vp)
add_disk(gd);
aoedisk_add_sysfs(d);
+ aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
WARN_ON(!(d->flags & DEVFL_GD_NOW));
@@ -351,6 +441,8 @@ err:
void
aoeblk_exit(void)
{
+ debugfs_remove_recursive(aoe_debugfs_dir);
+ aoe_debugfs_dir = NULL;
kmem_cache_destroy(buf_pool_cache);
}
@@ -362,7 +454,11 @@ aoeblk_init(void)
0, 0, NULL);
if (buf_pool_cache == NULL)
return -ENOMEM;
-
+ aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
+ if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
+ pr_info("aoe: cannot create debugfs directory\n");
+ aoe_debugfs_dir = NULL;
+ }
return 0;
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 4d45dba..d251543 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -380,7 +380,6 @@ aoecmd_ata_rw(struct aoedev *d)
{
struct frame *f;
struct buf *buf;
- struct aoetgt *t;
struct sk_buff *skb;
struct sk_buff_head queue;
ulong bcnt, fbcnt;
@@ -391,7 +390,6 @@ aoecmd_ata_rw(struct aoedev *d)
f = newframe(d);
if (f == NULL)
return 0;
- t = *d->tgt;
bcnt = d->maxbcnt;
if (bcnt == 0)
bcnt = DEFAULTBCNT;
@@ -485,7 +483,6 @@ resend(struct aoedev *d, struct frame *f)
struct sk_buff *skb;
struct sk_buff_head queue;
struct aoe_hdr *h;
- struct aoe_atahdr *ah;
struct aoetgt *t;
char buf[128];
u32 n;
@@ -500,7 +497,6 @@ resend(struct aoedev *d, struct frame *f)
return;
}
h = (struct aoe_hdr *) skb_mac_header(skb);
- ah = (struct aoe_atahdr *) (h+1);
if (!(f->flags & FFL_PROBE)) {
snprintf(buf, sizeof(buf),
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 784c92e..e774c50 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -12,6 +12,7 @@
#include <linux/bitmap.h>
#include <linux/kdev_t.h>
#include <linux/moduleparam.h>
+#include <linux/string.h>
#include "aoe.h"
static void dummy_timer(ulong);
@@ -241,16 +242,12 @@ aoedev_downdev(struct aoedev *d)
static int
user_req(char *s, size_t slen, struct aoedev *d)
{
- char *p;
+ const char *p;
size_t lim;
if (!d->gd)
return 0;
- p = strrchr(d->gd->disk_name, '/');
- if (!p)
- p = d->gd->disk_name;
- else
- p += 1;
+ p = kbasename(d->gd->disk_name);
lim = sizeof(d->gd->disk_name);
lim -= p - d->gd->disk_name;
if (slen < lim)
@@ -278,6 +275,7 @@ freedev(struct aoedev *d)
del_timer_sync(&d->timer);
if (d->gd) {
+ aoedisk_rm_debugfs(d);
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 62b6c2c..d2d95ff 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4258,6 +4258,13 @@ static void cciss_find_board_params(ctlr_info_t *h)
h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
/*
+ * The P600 may exhibit poor performnace under some workloads
+ * if we use the value in the configuration table. Limit this
+ * controller to MAXSGENTRIES (32) instead.
+ */
+ if (h->board_id == 0x3225103C)
+ h->maxsgentries = MAXSGENTRIES;
+ /*
* Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
*/
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index a56cfcd..77a60be 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -636,7 +636,7 @@ ok_to_write:
mg_request(host->breq);
}
-void mg_times_out(unsigned long data)
+static void mg_times_out(unsigned long data)
{
struct mg_host *host = (struct mg_host *)data;
char *name;
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
index 1fca1f99..0ba837f 100644
--- a/drivers/block/mtip32xx/Kconfig
+++ b/drivers/block/mtip32xx/Kconfig
@@ -4,6 +4,6 @@
config BLK_DEV_PCIESSD_MTIP32XX
tristate "Block Device Driver for Micron PCIe SSDs"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
help
This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ce79a59..da52092 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -36,6 +36,7 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/poison.h>
+#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -79,7 +80,9 @@ struct nvme_queue {
u16 sq_head;
u16 sq_tail;
u16 cq_head;
- u16 cq_phase;
+ u8 cq_phase;
+ u8 cqe_seen;
+ u8 q_suspended;
unsigned long cmdid_data[];
};
@@ -115,6 +118,11 @@ static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
}
+static unsigned nvme_queue_extra(int depth)
+{
+ return DIV_ROUND_UP(depth, 8) + (depth * sizeof(struct nvme_cmd_info));
+}
+
/**
* alloc_cmdid() - Allocate a Command ID
* @nvmeq: The queue that will be used for this command
@@ -285,6 +293,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
iod->npages = -1;
iod->length = nbytes;
iod->nents = 0;
+ iod->start_time = jiffies;
}
return iod;
@@ -308,6 +317,30 @@ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
kfree(iod);
}
+static void nvme_start_io_acct(struct bio *bio)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ const int rw = bio_data_dir(bio);
+ int cpu = part_stat_lock();
+ part_round_stats(cpu, &disk->part0);
+ part_stat_inc(cpu, &disk->part0, ios[rw]);
+ part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
+ part_inc_in_flight(&disk->part0, rw);
+ part_stat_unlock();
+}
+
+static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ const int rw = bio_data_dir(bio);
+ unsigned long duration = jiffies - start_time;
+ int cpu = part_stat_lock();
+ part_stat_add(cpu, &disk->part0, ticks[rw], duration);
+ part_round_stats(cpu, &disk->part0);
+ part_dec_in_flight(&disk->part0, rw);
+ part_stat_unlock();
+}
+
static void bio_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
{
@@ -315,9 +348,11 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1;
- if (iod->nents)
+ if (iod->nents) {
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ nvme_end_io_acct(bio, iod->start_time);
+ }
nvme_free_iod(dev, iod);
if (status)
bio_endio(bio, -EIO);
@@ -422,10 +457,8 @@ static void nvme_bio_pair_endio(struct bio *bio, int err)
if (atomic_dec_and_test(&bp->cnt)) {
bio_endio(bp->parent, bp->err);
- if (bp->bv1)
- kfree(bp->bv1);
- if (bp->bv2)
- kfree(bp->bv2);
+ kfree(bp->bv1);
+ kfree(bp->bv2);
kfree(bp);
}
}
@@ -695,6 +728,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+ nvme_start_io_acct(bio);
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
@@ -709,26 +743,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return result;
}
-static void nvme_make_request(struct request_queue *q, struct bio *bio)
-{
- struct nvme_ns *ns = q->queuedata;
- struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
- int result = -EBUSY;
-
- spin_lock_irq(&nvmeq->q_lock);
- if (bio_list_empty(&nvmeq->sq_cong))
- result = nvme_submit_bio_queue(nvmeq, ns, bio);
- if (unlikely(result)) {
- if (bio_list_empty(&nvmeq->sq_cong))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- }
-
- spin_unlock_irq(&nvmeq->q_lock);
- put_nvmeq(nvmeq);
-}
-
-static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+static int nvme_process_cq(struct nvme_queue *nvmeq)
{
u16 head, phase;
@@ -758,13 +773,40 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
* a big problem.
*/
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
- return IRQ_NONE;
+ return 0;
writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
- return IRQ_HANDLED;
+ nvmeq->cqe_seen = 1;
+ return 1;
+}
+
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+ int result = -EBUSY;
+
+ if (!nvmeq) {
+ put_nvmeq(NULL);
+ bio_endio(bio, -EIO);
+ return;
+ }
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ }
+
+ nvme_process_cq(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ put_nvmeq(nvmeq);
}
static irqreturn_t nvme_irq(int irq, void *data)
@@ -772,7 +814,9 @@ static irqreturn_t nvme_irq(int irq, void *data)
irqreturn_t result;
struct nvme_queue *nvmeq = data;
spin_lock(&nvmeq->q_lock);
- result = nvme_process_cq(nvmeq);
+ nvme_process_cq(nvmeq);
+ result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
+ nvmeq->cqe_seen = 0;
spin_unlock(&nvmeq->q_lock);
return result;
}
@@ -986,8 +1030,15 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
}
}
-static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
+static void nvme_free_queue(struct nvme_queue *nvmeq)
{
+ spin_lock_irq(&nvmeq->q_lock);
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ bio_endio(bio, -EIO);
+ }
+ spin_unlock_irq(&nvmeq->q_lock);
+
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
@@ -995,17 +1046,28 @@ static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
kfree(nvmeq);
}
-static void nvme_free_queue(struct nvme_dev *dev, int qid)
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->queue_count - 1; i >= 0; i--) {
+ nvme_free_queue(dev->queues[i]);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+}
+
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
{
struct nvme_queue *nvmeq = dev->queues[qid];
int vector = dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
- nvme_cancel_ios(nvmeq, false);
- while (bio_list_peek(&nvmeq->sq_cong)) {
- struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
- bio_endio(bio, -EIO);
+ if (nvmeq->q_suspended) {
+ spin_unlock_irq(&nvmeq->q_lock);
+ return;
}
+ nvmeq->q_suspended = 1;
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1017,15 +1079,17 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
adapter_delete_cq(dev, qid);
}
- nvme_free_queue_mem(nvmeq);
+ spin_lock_irq(&nvmeq->q_lock);
+ nvme_process_cq(nvmeq);
+ nvme_cancel_ios(nvmeq, false);
+ spin_unlock_irq(&nvmeq->q_lock);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
struct device *dmadev = &dev->pci_dev->dev;
- unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
- sizeof(struct nvme_cmd_info));
+ unsigned extra = nvme_queue_extra(depth);
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
if (!nvmeq)
return NULL;
@@ -1052,6 +1116,8 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
+ nvmeq->q_suspended = 1;
+ dev->queue_count++;
return nvmeq;
@@ -1075,18 +1141,29 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}
-static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
- int cq_size, int vector)
+static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
{
- int result;
- struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+ struct nvme_dev *dev = nvmeq->dev;
+ unsigned extra = nvme_queue_extra(nvmeq->q_depth);
- if (!nvmeq)
- return ERR_PTR(-ENOMEM);
+ nvmeq->sq_tail = 0;
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ memset(nvmeq->cmdid_data, 0, extra);
+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
+ nvme_cancel_ios(nvmeq, false);
+ nvmeq->q_suspended = 0;
+}
+
+static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
+{
+ struct nvme_dev *dev = nvmeq->dev;
+ int result;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
- goto free_nvmeq;
+ return result;
result = adapter_alloc_sq(dev, qid, nvmeq);
if (result < 0)
@@ -1096,19 +1173,17 @@ static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
if (result < 0)
goto release_sq;
- return nvmeq;
+ spin_lock(&nvmeq->q_lock);
+ nvme_init_queue(nvmeq, qid);
+ spin_unlock(&nvmeq->q_lock);
+
+ return result;
release_sq:
adapter_delete_sq(dev, qid);
release_cq:
adapter_delete_cq(dev, qid);
- free_nvmeq:
- dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
- (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
- return ERR_PTR(result);
+ return result;
}
static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
@@ -1152,6 +1227,30 @@ static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
return nvme_wait_ready(dev, cap, true);
}
+static int nvme_shutdown_ctrl(struct nvme_dev *dev)
+{
+ unsigned long timeout;
+ u32 cc;
+
+ cc = (readl(&dev->bar->cc) & ~NVME_CC_SHN_MASK) | NVME_CC_SHN_NORMAL;
+ writel(cc, &dev->bar->cc);
+
+ timeout = 2 * HZ + jiffies;
+ while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
+ NVME_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
@@ -1159,16 +1258,17 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = readq(&dev->bar->cap);
struct nvme_queue *nvmeq;
- dev->dbs = ((void __iomem *)dev->bar) + 4096;
- dev->db_stride = NVME_CAP_STRIDE(cap);
-
result = nvme_disable_ctrl(dev, cap);
if (result < 0)
return result;
- nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
- if (!nvmeq)
- return -ENOMEM;
+ nvmeq = dev->queues[0];
+ if (!nvmeq) {
+ nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ if (!nvmeq)
+ return -ENOMEM;
+ dev->queues[0] = nvmeq;
+ }
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1185,17 +1285,15 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
result = nvme_enable_ctrl(dev, cap);
if (result)
- goto free_q;
+ return result;
result = queue_request_irq(dev, nvmeq, "nvme admin");
if (result)
- goto free_q;
-
- dev->queues[0] = nvmeq;
- return result;
+ return result;
- free_q:
- nvme_free_queue_mem(nvmeq);
+ spin_lock(&nvmeq->q_lock);
+ nvme_init_queue(nvmeq, 0);
+ spin_unlock(&nvmeq->q_lock);
return result;
}
@@ -1314,7 +1412,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
if (meta_len) {
- meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len);
+ meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
+ meta_len);
if (IS_ERR(meta_iod)) {
status = PTR_ERR(meta_iod);
meta_iod = NULL;
@@ -1356,6 +1455,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
put_nvmeq(nvmeq);
if (length != (io.nblocks + 1) << ns->lba_shift)
status = -ENOMEM;
+ else if (!nvmeq || nvmeq->q_suspended)
+ status = -EBUSY;
else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
@@ -1453,6 +1554,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
switch (cmd) {
case NVME_IOCTL_ID:
+ force_successful_syscall_return();
return ns->ns_id;
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
@@ -1506,10 +1608,12 @@ static int nvme_kthread(void *data)
if (!nvmeq)
continue;
spin_lock_irq(&nvmeq->q_lock);
- if (nvme_process_cq(nvmeq))
- printk("process_cq did something\n");
+ if (nvmeq->q_suspended)
+ goto unlock;
+ nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, true);
nvme_resubmit_bios(nvmeq);
+ unlock:
spin_unlock_irq(&nvmeq->q_lock);
}
}
@@ -1556,7 +1660,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
-static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
struct nvme_ns *ns;
@@ -1631,14 +1735,19 @@ static int set_queue_count(struct nvme_dev *dev, int count)
status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
&result);
if (status)
- return -EIO;
+ return status < 0 ? -EIO : -EBUSY;
return min(result & 0xffff, result >> 16) + 1;
}
+static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
+{
+ return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct pci_dev *pdev = dev->pci_dev;
- int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
+ int result, cpu, i, vecs, nr_io_queues, size, q_depth;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
@@ -1647,53 +1756,80 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues)
nr_io_queues = result;
- q_count = nr_io_queues;
- /* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
-
- db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
- if (db_bar_size > 8192) {
+ size = db_bar_size(dev, nr_io_queues);
+ if (size > 8192) {
iounmap(dev->bar);
- dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
+ do {
+ dev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (dev->bar)
+ break;
+ if (!--nr_io_queues)
+ return -ENOMEM;
+ size = db_bar_size(dev, nr_io_queues);
+ } while (1);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs;
}
- for (i = 0; i < nr_io_queues; i++)
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
+ vecs = nr_io_queues;
+ for (i = 0; i < vecs; i++)
dev->entry[i].entry = i;
for (;;) {
- result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
- if (result == 0) {
- break;
- } else if (result > 0) {
- nr_io_queues = result;
- continue;
- } else {
- nr_io_queues = 0;
+ result = pci_enable_msix(pdev, dev->entry, vecs);
+ if (result <= 0)
break;
- }
+ vecs = result;
}
- if (nr_io_queues == 0) {
- nr_io_queues = q_count;
+ if (result < 0) {
+ vecs = nr_io_queues;
+ if (vecs > 32)
+ vecs = 32;
for (;;) {
- result = pci_enable_msi_block(pdev, nr_io_queues);
+ result = pci_enable_msi_block(pdev, vecs);
if (result == 0) {
- for (i = 0; i < nr_io_queues; i++)
+ for (i = 0; i < vecs; i++)
dev->entry[i].vector = i + pdev->irq;
break;
- } else if (result > 0) {
- nr_io_queues = result;
- continue;
- } else {
- nr_io_queues = 1;
+ } else if (result < 0) {
+ vecs = 1;
break;
}
+ vecs = result;
}
}
+ /*
+ * Should investigate if there's a performance win from allocating
+ * more queues than interrupt vectors; it might allow the submission
+ * path to scale better, even if the receive path is limited by the
+ * number of interrupts.
+ */
+ nr_io_queues = vecs;
+
result = queue_request_irq(dev, dev->queues[0], "nvme admin");
- /* XXX: handle failure here */
+ if (result) {
+ dev->queues[0]->q_suspended = 1;
+ goto free_queues;
+ }
+
+ /* Free previously allocated queues that are no longer usable */
+ spin_lock(&dev_list_lock);
+ for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+
+ spin_lock(&nvmeq->q_lock);
+ nvme_cancel_ios(nvmeq, false);
+ spin_unlock(&nvmeq->q_lock);
+
+ nvme_free_queue(nvmeq);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+ spin_unlock(&dev_list_lock);
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < nr_io_queues; i++) {
@@ -1703,11 +1839,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
NVME_Q_DEPTH);
- for (i = 0; i < nr_io_queues; i++) {
- dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
- if (IS_ERR(dev->queues[i + 1]))
- return PTR_ERR(dev->queues[i + 1]);
- dev->queue_count++;
+ for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
+ dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
+ if (!dev->queues[i + 1]) {
+ result = -ENOMEM;
+ goto free_queues;
+ }
}
for (; i < num_possible_cpus(); i++) {
@@ -1715,15 +1852,20 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->queues[i + 1] = dev->queues[target + 1];
}
- return 0;
-}
+ for (i = 1; i < dev->queue_count; i++) {
+ result = nvme_create_queue(dev->queues[i], i);
+ if (result) {
+ for (--i; i > 0; i--)
+ nvme_disable_queue(dev, i);
+ goto free_queues;
+ }
+ }
-static void nvme_free_queues(struct nvme_dev *dev)
-{
- int i;
+ return 0;
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_free_queue(dev, i);
+ free_queues:
+ nvme_free_queues(dev);
+ return result;
}
/*
@@ -1734,7 +1876,8 @@ static void nvme_free_queues(struct nvme_dev *dev)
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
- int res, nn, i;
+ int res;
+ unsigned nn, i;
struct nvme_ns *ns;
struct nvme_id_ctrl *ctrl;
struct nvme_id_ns *id_ns;
@@ -1742,10 +1885,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
dma_addr_t dma_addr;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
- res = nvme_setup_io_queues(dev);
- if (res)
- return res;
-
mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
GFP_KERNEL);
if (!mem)
@@ -1796,23 +1935,86 @@ static int nvme_dev_add(struct nvme_dev *dev)
return res;
}
-static int nvme_dev_remove(struct nvme_dev *dev)
+static int nvme_dev_map(struct nvme_dev *dev)
{
- struct nvme_ns *ns, *next;
+ int bars, result = -ENOMEM;
+ struct pci_dev *pdev = dev->pci_dev;
+
+ if (pci_enable_device_mem(pdev))
+ return result;
+
+ dev->entry[0].vector = pdev->irq;
+ pci_set_master(pdev);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ goto disable_pci;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ else
+ goto disable_pci;
+
+ pci_set_drvdata(pdev, dev);
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar)
+ goto disable;
+
+ dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+ return 0;
+
+ disable:
+ pci_release_regions(pdev);
+ disable_pci:
+ pci_disable_device(pdev);
+ return result;
+}
+
+static void nvme_dev_unmap(struct nvme_dev *dev)
+{
+ if (dev->pci_dev->msi_enabled)
+ pci_disable_msi(dev->pci_dev);
+ else if (dev->pci_dev->msix_enabled)
+ pci_disable_msix(dev->pci_dev);
+
+ if (dev->bar) {
+ iounmap(dev->bar);
+ dev->bar = NULL;
+ }
+
+ pci_release_regions(dev->pci_dev);
+ if (pci_is_enabled(dev->pci_dev))
+ pci_disable_device(dev->pci_dev);
+}
+
+static void nvme_dev_shutdown(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->queue_count - 1; i >= 0; i--)
+ nvme_disable_queue(dev, i);
spin_lock(&dev_list_lock);
- list_del(&dev->node);
+ list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
+ if (dev->bar)
+ nvme_shutdown_ctrl(dev);
+ nvme_dev_unmap(dev);
+}
+
+static void nvme_dev_remove(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
list_del(&ns->list);
del_gendisk(ns->disk);
nvme_ns_free(ns);
}
-
- nvme_free_queues(dev);
-
- return 0;
}
static int nvme_setup_prp_pools(struct nvme_dev *dev)
@@ -1872,15 +2074,10 @@ static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_dev_remove(dev);
- if (dev->pci_dev->msi_enabled)
- pci_disable_msi(dev->pci_dev);
- else if (dev->pci_dev->msix_enabled)
- pci_disable_msix(dev->pci_dev);
- iounmap(dev->bar);
+ nvme_dev_shutdown(dev);
+ nvme_free_queues(dev);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
- pci_disable_device(dev->pci_dev);
- pci_release_regions(dev->pci_dev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -1921,9 +2118,40 @@ static const struct file_operations nvme_dev_fops = {
.compat_ioctl = nvme_dev_ioctl,
};
+static int nvme_dev_start(struct nvme_dev *dev)
+{
+ int result;
+
+ result = nvme_dev_map(dev);
+ if (result)
+ return result;
+
+ result = nvme_configure_admin_queue(dev);
+ if (result)
+ goto unmap;
+
+ spin_lock(&dev_list_lock);
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
+ result = nvme_setup_io_queues(dev);
+ if (result && result != -EBUSY)
+ goto disable;
+
+ return result;
+
+ disable:
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ spin_unlock(&dev_list_lock);
+ unmap:
+ nvme_dev_unmap(dev);
+ return result;
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int bars, result = -ENOMEM;
+ int result = -ENOMEM;
struct nvme_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1938,53 +2166,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!dev->queues)
goto free;
- if (pci_enable_device_mem(pdev))
- goto free;
- pci_set_master(pdev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (pci_request_selected_regions(pdev, bars, "nvme"))
- goto disable;
-
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
- pci_set_drvdata(pdev, dev);
-
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- else
- goto disable;
-
result = nvme_set_instance(dev);
if (result)
- goto disable;
-
- dev->entry[0].vector = pdev->irq;
+ goto free;
result = nvme_setup_prp_pools(dev);
if (result)
- goto disable_msix;
+ goto release;
- dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
- if (!dev->bar) {
- result = -ENOMEM;
- goto disable_msix;
+ result = nvme_dev_start(dev);
+ if (result) {
+ if (result == -EBUSY)
+ goto create_cdev;
+ goto release_pools;
}
- result = nvme_configure_admin_queue(dev);
- if (result)
- goto unmap;
- dev->queue_count++;
-
- spin_lock(&dev_list_lock);
- list_add(&dev->node, &dev_list);
- spin_unlock(&dev_list_lock);
-
result = nvme_dev_add(dev);
if (result)
- goto delete;
+ goto shutdown;
+ create_cdev:
scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
dev->miscdev.minor = MISC_DYNAMIC_MINOR;
dev->miscdev.parent = &pdev->dev;
@@ -1999,24 +2202,13 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
remove:
nvme_dev_remove(dev);
- delete:
- spin_lock(&dev_list_lock);
- list_del(&dev->node);
- spin_unlock(&dev_list_lock);
-
+ shutdown:
+ nvme_dev_shutdown(dev);
+ release_pools:
nvme_free_queues(dev);
- unmap:
- iounmap(dev->bar);
- disable_msix:
- if (dev->pci_dev->msi_enabled)
- pci_disable_msi(dev->pci_dev);
- else if (dev->pci_dev->msix_enabled)
- pci_disable_msix(dev->pci_dev);
- nvme_release_instance(dev);
nvme_release_prp_pools(dev);
- disable:
- pci_disable_device(pdev);
- pci_release_regions(pdev);
+ release:
+ nvme_release_instance(dev);
free:
kfree(dev->queues);
kfree(dev->entry);
@@ -2037,8 +2229,30 @@ static void nvme_remove(struct pci_dev *pdev)
#define nvme_link_reset NULL
#define nvme_slot_reset NULL
#define nvme_error_resume NULL
-#define nvme_suspend NULL
-#define nvme_resume NULL
+
+static int nvme_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct nvme_dev *ndev = pci_get_drvdata(pdev);
+
+ nvme_dev_shutdown(ndev);
+ return 0;
+}
+
+static int nvme_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct nvme_dev *ndev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = nvme_dev_start(ndev);
+ /* XXX: should remove gendisks if resume fails */
+ if (ret)
+ nvme_free_queues(ndev);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
static const struct pci_error_handlers nvme_err_handler = {
.error_detected = nvme_error_detected,
@@ -2062,8 +2276,9 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
- .suspend = nvme_suspend,
- .resume = nvme_resume,
+ .driver = {
+ .pm = &nvme_dev_pm_ops,
+ },
.err_handler = &nvme_err_handler,
};
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 102de2f..4a4ff4e 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -933,13 +933,12 @@ static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int res = SNTI_TRANSLATION_SUCCESS;
int xfer_len;
- inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+ inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
if (inq_response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
inq_response[2] = 0x00; /* Page Length MSB */
inq_response[3] = 0x3C; /* Page Length LSB */
@@ -964,12 +963,11 @@ static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int xfer_len;
u8 *log_response;
- log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
+ log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
if (log_response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
/* Subpage=0x00, Page Length MSB=0 */
@@ -1000,12 +998,11 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
u8 temp_c;
u16 temp_k;
- log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
+ log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
if (log_response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH);
mem = dma_alloc_coherent(&dev->pci_dev->dev,
sizeof(struct nvme_smart_log),
@@ -1069,12 +1066,11 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 temp_c_cur, temp_c_thresh;
u16 temp_k;
- log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
+ log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
if (log_response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(log_response, 0, LOG_TEMP_PAGE_LENGTH);
mem = dma_alloc_coherent(&dev->pci_dev->dev,
sizeof(struct nvme_smart_log),
@@ -1380,12 +1376,11 @@ static int nvme_trans_mode_page_create(struct nvme_ns *ns,
blk_desc_offset = mph_size;
mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
- response = kmalloc(resp_size, GFP_KERNEL);
+ response = kzalloc(resp_size, GFP_KERNEL);
if (response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(response, 0, resp_size);
res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
llbaa, mode_data_length, blk_desc_len);
@@ -2480,12 +2475,11 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
}
id_ns = mem;
- response = kmalloc(resp_size, GFP_KERNEL);
+ response = kzalloc(resp_size, GFP_KERNEL);
if (response == NULL) {
res = -ENOMEM;
goto out_dma;
}
- memset(response, 0, resp_size);
nvme_trans_fill_read_cap(response, id_ns, cdb16);
xfer_len = min(alloc_len, resp_size);
@@ -2554,12 +2548,11 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
goto out_dma;
}
- response = kmalloc(resp_size, GFP_KERNEL);
+ response = kzalloc(resp_size, GFP_KERNEL);
if (response == NULL) {
res = -ENOMEM;
goto out_dma;
}
- memset(response, 0, resp_size);
/* The first LUN ID will always be 0 per the SAM spec */
for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
@@ -2600,12 +2593,11 @@ static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
(FIXED_FMT_SENSE_DATA_SIZE));
- response = kmalloc(resp_size, GFP_KERNEL);
+ response = kzalloc(resp_size, GFP_KERNEL);
if (response == NULL) {
res = -ENOMEM;
goto out;
}
- memset(response, 0, resp_size);
if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
/* Descriptor Format Sense Data */
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 1bbc681..79aa179 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -598,7 +598,7 @@ static ssize_t class_osdblk_remove(struct class *c,
unsigned long ul;
struct list_head *tmp;
- rc = strict_strtoul(buf, 10, &ul);
+ rc = kstrtoul(buf, 10, &ul);
if (rc)
return rc;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f5d0ea1..5618847 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -44,6 +44,8 @@
*
*************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pktcdvd.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -69,23 +71,24 @@
#define DRIVER_NAME "pktcdvd"
-#if PACKET_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define DPRINTK(fmt, args...)
-#endif
-
-#if PACKET_DEBUG > 1
-#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define VPRINTK(fmt, args...)
-#endif
+#define pkt_err(pd, fmt, ...) \
+ pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_notice(pd, fmt, ...) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_info(pd, fmt, ...) \
+ pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
+
+#define pkt_dbg(level, pd, fmt, ...) \
+do { \
+ if (level == 2 && PACKET_DEBUG >= 2) \
+ pr_notice("%s: %s():" fmt, \
+ pd->name, __func__, ##__VA_ARGS__); \
+ else if (level == 1 && PACKET_DEBUG >= 1) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
+} while (0)
#define MAX_SPEED 0xffff
-#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
- ~(sector_t)((pd)->settings.size - 1))
-
static DEFINE_MUTEX(pktcdvd_mutex);
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
static struct proc_dir_entry *pkt_proc;
@@ -103,7 +106,10 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
static int pkt_remove_dev(dev_t pkt_dev);
static int pkt_seq_show(struct seq_file *m, void *p);
-
+static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+{
+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+}
/*
* create and register a pktcdvd kernel object.
@@ -424,7 +430,7 @@ static int pkt_sysfs_init(void)
if (ret) {
kfree(class_pktcdvd);
class_pktcdvd = NULL;
- printk(DRIVER_NAME": failed to create class pktcdvd\n");
+ pr_err("failed to create class pktcdvd\n");
return ret;
}
return 0;
@@ -517,7 +523,7 @@ static void pkt_bio_finished(struct pktcdvd_device *pd)
{
BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
- VPRINTK(DRIVER_NAME": queue empty\n");
+ pkt_dbg(2, pd, "queue empty\n");
atomic_set(&pd->iosched.attention, 1);
wake_up(&pd->wqueue);
}
@@ -734,36 +740,33 @@ out:
return ret;
}
+static const char *sense_key_string(__u8 index)
+{
+ static const char * const info[] = {
+ "No sense", "Recovered error", "Not ready",
+ "Medium error", "Hardware error", "Illegal request",
+ "Unit attention", "Data protect", "Blank check",
+ };
+
+ return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
+}
+
/*
* A generic sense dump / resolve mechanism should be implemented across
* all ATAPI + SCSI devices.
*/
-static void pkt_dump_sense(struct packet_command *cgc)
+static void pkt_dump_sense(struct pktcdvd_device *pd,
+ struct packet_command *cgc)
{
- static char *info[9] = { "No sense", "Recovered error", "Not ready",
- "Medium error", "Hardware error", "Illegal request",
- "Unit attention", "Data protect", "Blank check" };
- int i;
struct request_sense *sense = cgc->sense;
- printk(DRIVER_NAME":");
- for (i = 0; i < CDROM_PACKET_SIZE; i++)
- printk(" %02x", cgc->cmd[i]);
- printk(" - ");
-
- if (sense == NULL) {
- printk("no sense\n");
- return;
- }
-
- printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
-
- if (sense->sense_key > 8) {
- printk(" (INVALID)\n");
- return;
- }
-
- printk(" (%s)\n", info[sense->sense_key]);
+ if (sense)
+ pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+ CDROM_PACKET_SIZE, cgc->cmd,
+ sense->sense_key, sense->asc, sense->ascq,
+ sense_key_string(sense->sense_key));
+ else
+ pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
}
/*
@@ -806,7 +809,7 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
cgc.cmd[5] = write_speed & 0xff;
if ((ret = pkt_generic_packet(pd, &cgc)))
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -872,7 +875,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- VPRINTK(DRIVER_NAME": write, waiting\n");
+ pkt_dbg(2, pd, "write, waiting\n");
break;
}
pkt_flush_cache(pd);
@@ -881,7 +884,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
} else {
if (!reads_queued && writes_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- VPRINTK(DRIVER_NAME": read, waiting\n");
+ pkt_dbg(2, pd, "read, waiting\n");
break;
}
pd->iosched.writing = 1;
@@ -943,7 +946,7 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
set_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
} else {
- printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
+ pkt_err(pd, "cdrom max_phys_segments too small\n");
return -EIO;
}
}
@@ -987,8 +990,9 @@ static void pkt_end_io_read(struct bio *bio, int err)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
- (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
+ pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
+ bio, (unsigned long long)pkt->sector,
+ (unsigned long long)bio->bi_sector, err);
if (err)
atomic_inc(&pkt->io_errors);
@@ -1005,7 +1009,7 @@ static void pkt_end_io_packet_write(struct bio *bio, int err)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
+ pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
pd->stats.pkt_ended++;
@@ -1047,7 +1051,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
spin_unlock(&pkt->lock);
if (pkt->cache_valid) {
- VPRINTK("pkt_gather_data: zone %llx cached\n",
+ pkt_dbg(2, pd, "zone %llx cached\n",
(unsigned long long)pkt->sector);
goto out_account;
}
@@ -1070,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
+ pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
f, pkt->pages[p], offset);
if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
BUG();
@@ -1082,7 +1086,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
}
out_account:
- VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
+ pkt_dbg(2, pd, "need %d frames for zone %llx\n",
frames_read, (unsigned long long)pkt->sector);
pd->stats.pkt_started++;
pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
@@ -1183,7 +1187,8 @@ static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state
"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
};
enum packet_data_state old_state = pkt->state;
- VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
+ pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
+ pkt->id, (unsigned long long)pkt->sector,
state_name[old_state], state_name[state]);
#endif
pkt->state = state;
@@ -1202,12 +1207,10 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
struct rb_node *n;
int wakeup;
- VPRINTK("handle_queue\n");
-
atomic_set(&pd->scan_queue, 0);
if (list_empty(&pd->cdrw.pkt_free_list)) {
- VPRINTK("handle_queue: no pkt\n");
+ pkt_dbg(2, pd, "no pkt\n");
return 0;
}
@@ -1224,7 +1227,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
node = first_node;
while (node) {
bio = node->bio;
- zone = ZONE(bio->bi_sector, pd);
+ zone = get_zone(bio->bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
@@ -1244,7 +1247,7 @@ try_next_bio:
}
spin_unlock(&pd->lock);
if (!bio) {
- VPRINTK("handle_queue: no bio\n");
+ pkt_dbg(2, pd, "no bio\n");
return 0;
}
@@ -1260,12 +1263,12 @@ try_next_bio:
* to this packet.
*/
spin_lock(&pd->lock);
- VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
+ pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
bio = node->bio;
- VPRINTK("pkt_handle_queue: found zone=%llx\n",
- (unsigned long long)ZONE(bio->bi_sector, pd));
- if (ZONE(bio->bi_sector, pd) != zone)
+ pkt_dbg(2, pd, "found zone=%llx\n",
+ (unsigned long long)get_zone(bio->bi_sector, pd));
+ if (get_zone(bio->bi_sector, pd) != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
@@ -1316,7 +1319,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
BUG();
}
- VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
+ pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
/*
* Fill-in bvec with data from orig_bios.
@@ -1327,7 +1330,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
- VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
+ pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
pkt->write_size, (unsigned long long)pkt->sector);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
@@ -1359,7 +1362,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
{
int uptodate;
- VPRINTK("run_state_machine: pkt %d\n", pkt->id);
+ pkt_dbg(2, pd, "pkt %d\n", pkt->id);
for (;;) {
switch (pkt->state) {
@@ -1398,7 +1401,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
if (pkt_start_recovery(pkt)) {
pkt_start_write(pd, pkt);
} else {
- VPRINTK("No recovery possible\n");
+ pkt_dbg(2, pd, "No recovery possible\n");
pkt_set_state(pkt, PACKET_FINISHED_STATE);
}
break;
@@ -1419,8 +1422,6 @@ static void pkt_handle_packets(struct pktcdvd_device *pd)
{
struct packet_data *pkt, *next;
- VPRINTK("pkt_handle_packets\n");
-
/*
* Run state machine for active packets
*/
@@ -1502,9 +1503,9 @@ static int kcdrwd(void *foobar)
if (PACKET_DEBUG > 1) {
int states[PACKET_NUM_STATES];
pkt_count_states(pd, states);
- VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3],
- states[4], states[5]);
+ pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2],
+ states[3], states[4], states[5]);
}
min_sleep_time = MAX_SCHEDULE_TIMEOUT;
@@ -1513,9 +1514,9 @@ static int kcdrwd(void *foobar)
min_sleep_time = pkt->sleep_time;
}
- VPRINTK("kcdrwd: sleeping\n");
+ pkt_dbg(2, pd, "sleeping\n");
residue = schedule_timeout(min_sleep_time);
- VPRINTK("kcdrwd: wake up\n");
+ pkt_dbg(2, pd, "wake up\n");
/* make swsusp happy with our thread */
try_to_freeze();
@@ -1563,9 +1564,10 @@ work_to_do:
static void pkt_print_settings(struct pktcdvd_device *pd)
{
- printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
- printk("%u blocks, ", pd->settings.size >> 2);
- printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+ pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+ pd->settings.fp ? "Fixed" : "Variable",
+ pd->settings.size >> 2,
+ pd->settings.block_mode == 8 ? '1' : '2');
}
static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
@@ -1699,7 +1701,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
cgc.sense = &sense;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1714,7 +1716,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
cgc.sense = &sense;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1749,14 +1751,14 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
/*
* paranoia
*/
- printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
+ pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
return 1;
}
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
cgc.buflen = cgc.cmd[8] = size;
if ((ret = pkt_mode_select(pd, &cgc))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1793,7 +1795,7 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
if (ti->rt == 1 && ti->blank == 0)
return 1;
- printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+ pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
return 0;
}
@@ -1811,7 +1813,8 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
case 0x12: /* DVD-RAM */
return 1;
default:
- VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
+ pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
+ pd->mmc3_profile);
return 0;
}
@@ -1820,22 +1823,22 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
* but i'm not sure, should we leave this to user apps? probably.
*/
if (di->disc_type == 0xff) {
- printk(DRIVER_NAME": Unknown disc. No track?\n");
+ pkt_notice(pd, "unknown disc - no track?\n");
return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
- printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
+ pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
return 0;
}
if (di->erasable == 0) {
- printk(DRIVER_NAME": Disc not erasable\n");
+ pkt_notice(pd, "disc not erasable\n");
return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
- printk(DRIVER_NAME": Can't write to last track (reserved)\n");
+ pkt_err(pd, "can't write to last track (reserved)\n");
return 0;
}
@@ -1860,7 +1863,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
memset(&ti, 0, sizeof(track_information));
if ((ret = pkt_get_disc_info(pd, &di))) {
- printk("failed get_disc\n");
+ pkt_err(pd, "failed get_disc\n");
return ret;
}
@@ -1871,12 +1874,12 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
- printk(DRIVER_NAME": failed get_track\n");
+ pkt_err(pd, "failed get_track\n");
return ret;
}
if (!pkt_writable_track(pd, &ti)) {
- printk(DRIVER_NAME": can't write to this track\n");
+ pkt_err(pd, "can't write to this track\n");
return -EROFS;
}
@@ -1886,11 +1889,11 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
*/
pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
if (pd->settings.size == 0) {
- printk(DRIVER_NAME": detected zero packet size!\n");
+ pkt_notice(pd, "detected zero packet size!\n");
return -ENXIO;
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
- printk(DRIVER_NAME": packet size is too big\n");
+ pkt_err(pd, "packet size is too big\n");
return -EROFS;
}
pd->settings.fp = ti.fp;
@@ -1932,7 +1935,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
pd->settings.block_mode = PACKET_BLOCK_MODE2;
break;
default:
- printk(DRIVER_NAME": unknown data mode\n");
+ pkt_err(pd, "unknown data mode\n");
return -EROFS;
}
return 0;
@@ -1966,10 +1969,10 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
ret = pkt_mode_select(pd, &cgc);
if (ret) {
- printk(DRIVER_NAME": write caching control failed\n");
- pkt_dump_sense(&cgc);
+ pkt_err(pd, "write caching control failed\n");
+ pkt_dump_sense(pd, &cgc);
} else if (!ret && set)
- printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
+ pkt_notice(pd, "enabled write caching\n");
return ret;
}
@@ -2005,7 +2008,7 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
sizeof(struct mode_page_header);
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
}
@@ -2064,7 +2067,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
cgc.cmd[8] = 2;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
@@ -2079,16 +2082,16 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
cgc.cmd[8] = size;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
if (!(buf[6] & 0x40)) {
- printk(DRIVER_NAME": Disc type is not CD-RW\n");
+ pkt_notice(pd, "disc type is not CD-RW\n");
return 1;
}
if (!(buf[6] & 0x4)) {
- printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
+ pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
return 1;
}
@@ -2108,14 +2111,14 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
*speed = us_clv_to_speed[sp];
break;
default:
- printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
+ pkt_notice(pd, "unknown disc sub-type %d\n", st);
return 1;
}
if (*speed) {
- printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
+ pkt_info(pd, "maximum media speed: %d\n", *speed);
return 0;
} else {
- printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
+ pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
return 1;
}
}
@@ -2126,7 +2129,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
struct request_sense sense;
int ret;
- VPRINTK(DRIVER_NAME": Performing OPC\n");
+ pkt_dbg(2, pd, "Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sense = &sense;
@@ -2134,7 +2137,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
cgc.cmd[0] = GPCMD_SEND_OPC;
cgc.cmd[1] = 1;
if ((ret = pkt_generic_packet(pd, &cgc)))
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -2144,12 +2147,12 @@ static int pkt_open_write(struct pktcdvd_device *pd)
unsigned int write_speed, media_write_speed, read_speed;
if ((ret = pkt_probe_settings(pd))) {
- VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
+ pkt_dbg(2, pd, "failed probe\n");
return ret;
}
if ((ret = pkt_set_write_settings(pd))) {
- DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
+ pkt_dbg(1, pd, "failed saving write settings\n");
return -EIO;
}
@@ -2161,26 +2164,26 @@ static int pkt_open_write(struct pktcdvd_device *pd)
case 0x13: /* DVD-RW */
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
- DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
+ pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
break;
default:
if ((ret = pkt_media_speed(pd, &media_write_speed)))
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
- DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
+ pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
break;
}
read_speed = write_speed;
if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
- DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
+ pkt_dbg(1, pd, "couldn't set write speed\n");
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
if ((ret = pkt_perform_opc(pd))) {
- DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
+ pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
}
return 0;
@@ -2205,7 +2208,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
goto out;
if ((ret = pkt_get_last_written(pd, &lba))) {
- printk(DRIVER_NAME": pkt_get_last_written failed\n");
+ pkt_err(pd, "pkt_get_last_written failed\n");
goto out_putdev;
}
@@ -2235,11 +2238,11 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
if (write) {
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- printk(DRIVER_NAME": not enough memory for buffers\n");
+ pkt_err(pd, "not enough memory for buffers\n");
ret = -ENOMEM;
goto out_putdev;
}
- printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
+ pkt_info(pd, "%lukB available on disc\n", lba << 1);
}
return 0;
@@ -2257,7 +2260,7 @@ out:
static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
{
if (flush && pkt_flush_cache(pd))
- DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
+ pkt_dbg(1, pd, "not flushing cache\n");
pkt_lock_door(pd, 0);
@@ -2279,8 +2282,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
struct pktcdvd_device *pd = NULL;
int ret;
- VPRINTK(DRIVER_NAME": entering open\n");
-
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
@@ -2315,7 +2316,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
out_dec:
pd->refcnt--;
out:
- VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return ret;
@@ -2360,7 +2360,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
pd = q->queuedata;
if (!pd) {
- printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
+ pr_err("%s incorrect request queue\n",
+ bdevname(bio->bi_bdev, b));
goto end_io;
}
@@ -2382,20 +2383,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
- pd->name, (unsigned long long)bio->bi_sector);
+ pkt_notice(pd, "WRITE for ro device (%llu)\n",
+ (unsigned long long)bio->bi_sector);
goto end_io;
}
if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
- printk(DRIVER_NAME": wrong bio size\n");
+ pkt_err(pd, "wrong bio size\n");
goto end_io;
}
blk_queue_bounce(q, &bio);
- zone = ZONE(bio->bi_sector, pd);
- VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
+ zone = get_zone(bio->bi_sector, pd);
+ pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_sector,
(unsigned long long)bio_end_sector(bio));
@@ -2405,7 +2406,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
sector_t last_zone;
int first_sectors;
- last_zone = ZONE(bio_end_sector(bio) - 1, pd);
+ last_zone = get_zone(bio_end_sector(bio) - 1, pd);
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
first_sectors = last_zone - bio->bi_sector;
@@ -2500,7 +2501,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct pktcdvd_device *pd = q->queuedata;
- sector_t zone = ZONE(bmd->bi_sector, pd);
+ sector_t zone = get_zone(bmd->bi_sector, pd);
int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
int remaining = (pd->settings.size << 9) - used;
int remaining2;
@@ -2609,7 +2610,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
struct block_device *bdev;
if (pd->pkt_dev == dev) {
- printk(DRIVER_NAME": Recursive setup not allowed\n");
+ pkt_err(pd, "recursive setup not allowed\n");
return -EBUSY;
}
for (i = 0; i < MAX_WRITERS; i++) {
@@ -2617,11 +2618,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
- printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
+ pkt_err(pd, "%s already setup\n",
+ bdevname(pd2->bdev, b));
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
- printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
+ pkt_err(pd, "can't chain pktcdvd devices\n");
return -EBUSY;
}
}
@@ -2644,13 +2646,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
atomic_set(&pd->cdrw.pending_bios, 0);
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
if (IS_ERR(pd->cdrw.thread)) {
- printk(DRIVER_NAME": can't start kernel thread\n");
+ pkt_err(pd, "can't start kernel thread\n");
ret = -ENOMEM;
goto out_mem;
}
proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
- DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
+ pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
return 0;
out_mem:
@@ -2665,8 +2667,8 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
struct pktcdvd_device *pd = bdev->bd_disk->private_data;
int ret;
- VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
- MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+ pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
+ cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
mutex_lock(&pktcdvd_mutex);
switch (cmd) {
@@ -2690,7 +2692,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
break;
default:
- VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
+ pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
ret = -ENOTTY;
}
mutex_unlock(&pktcdvd_mutex);
@@ -2743,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
if (!pkt_devs[idx])
break;
if (idx == MAX_WRITERS) {
- printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
+ pr_err("max %d writers supported\n", MAX_WRITERS);
ret = -EBUSY;
goto out_mutex;
}
@@ -2818,7 +2820,7 @@ out_mem:
kfree(pd);
out_mutex:
mutex_unlock(&ctl_mutex);
- printk(DRIVER_NAME": setup of pktcdvd device failed\n");
+ pr_err("setup of pktcdvd device failed\n");
return ret;
}
@@ -2839,7 +2841,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
break;
}
if (idx == MAX_WRITERS) {
- DPRINTK(DRIVER_NAME": dev not setup\n");
+ pr_debug("dev not setup\n");
ret = -ENXIO;
goto out;
}
@@ -2859,7 +2861,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
remove_proc_entry(pd->name, pkt_proc);
- DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
+ pkt_dbg(1, pd, "writer unmapped\n");
del_gendisk(pd->disk);
blk_cleanup_queue(pd->disk->queue);
@@ -2969,7 +2971,7 @@ static int __init pkt_init(void)
ret = register_blkdev(pktdev_major, DRIVER_NAME);
if (ret < 0) {
- printk(DRIVER_NAME": Unable to register block device\n");
+ pr_err("unable to register block device\n");
goto out2;
}
if (!pktdev_major)
@@ -2983,7 +2985,7 @@ static int __init pkt_init(void)
ret = misc_register(&pkt_misc);
if (ret) {
- printk(DRIVER_NAME": Unable to register misc device\n");
+ pr_err("unable to register misc device\n");
goto out_misc;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 191cd17..cb1db29 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -931,12 +931,14 @@ static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
u64 snap_id)
{
u32 which;
+ const char *snap_name;
which = rbd_dev_snap_index(rbd_dev, snap_id);
if (which == BAD_SNAP_INDEX)
- return NULL;
+ return ERR_PTR(-ENOENT);
- return _rbd_dev_v1_snap_name(rbd_dev, which);
+ snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
+ return snap_name ? snap_name : ERR_PTR(-ENOMEM);
}
static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
@@ -1561,11 +1563,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
obj_request, obj_request->img_request, obj_request->result,
xferred, length);
/*
- * ENOENT means a hole in the image. We zero-fill the
- * entire length of the request. A short read also implies
- * zero-fill to the end of the request. Either way we
- * update the xferred count to indicate the whole request
- * was satisfied.
+ * ENOENT means a hole in the image. We zero-fill the entire
+ * length of the request. A short read also implies zero-fill
+ * to the end of the request. An error requires the whole
+ * length of the request to be reported finished with an error
+ * to the block layer. In each case we update the xferred
+ * count to indicate the whole request was satisfied.
*/
rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
if (obj_request->result == -ENOENT) {
@@ -1574,14 +1577,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
else
zero_pages(obj_request->pages, 0, length);
obj_request->result = 0;
- obj_request->xferred = length;
} else if (xferred < length && !obj_request->result) {
if (obj_request->type == OBJ_REQUEST_BIO)
zero_bio_chain(obj_request->bio_list, xferred);
else
zero_pages(obj_request->pages, xferred, length);
- obj_request->xferred = length;
}
+ obj_request->xferred = length;
obj_request_done_set(obj_request);
}
@@ -2167,9 +2169,9 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request = NULL;
struct rbd_obj_request *next_obj_request;
bool write_request = img_request_write_test(img_request);
- struct bio *bio_list = 0;
+ struct bio *bio_list = NULL;
unsigned int bio_offset = 0;
- struct page **pages = 0;
+ struct page **pages = NULL;
u64 img_offset;
u64 resid;
u16 opcode;
@@ -2207,6 +2209,11 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
rbd_segment_name_free(object_name);
if (!obj_request)
goto out_unwind;
+ /*
+ * set obj_request->img_request before creating the
+ * osd_request so that it gets the right snapc
+ */
+ rbd_img_obj_request_add(img_request, obj_request);
if (type == OBJ_REQUEST_BIO) {
unsigned int clone_size;
@@ -2248,11 +2255,6 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
obj_request->pages, length,
offset & ~PAGE_MASK, false, false);
- /*
- * set obj_request->img_request before formatting
- * the osd_request so that it gets the right snapc
- */
- rbd_img_obj_request_add(img_request, obj_request);
if (write_request)
rbd_osd_req_format_write(obj_request);
else
@@ -2812,7 +2814,7 @@ out_err:
obj_request_done_set(obj_request);
}
-static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
+static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
{
struct rbd_obj_request *obj_request;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
@@ -2827,16 +2829,17 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
- obj_request->callback = rbd_obj_request_put;
osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
notify_id, 0, 0);
rbd_osd_req_format_read(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request);
-out:
if (ret)
- rbd_obj_request_put(obj_request);
+ goto out;
+ ret = rbd_obj_request_wait(obj_request);
+out:
+ rbd_obj_request_put(obj_request);
return ret;
}
@@ -2856,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
if (ret)
rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
- rbd_obj_notify_ack(rbd_dev, notify_id);
+ rbd_obj_notify_ack_sync(rbd_dev, notify_id);
}
/*
@@ -3328,6 +3331,31 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
}
+static void rbd_dev_update_size(struct rbd_device *rbd_dev)
+{
+ sector_t size;
+ bool removing;
+
+ /*
+ * Don't hold the lock while doing disk operations,
+ * or lock ordering will conflict with the bdev mutex via:
+ * rbd_add() -> blkdev_get() -> rbd_open()
+ */
+ spin_lock_irq(&rbd_dev->lock);
+ removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
+ spin_unlock_irq(&rbd_dev->lock);
+ /*
+ * If the device is being removed, rbd_dev->disk has
+ * been destroyed, so don't try to update its size
+ */
+ if (!removing) {
+ size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
+ dout("setting size to %llu sectors", (unsigned long long)size);
+ set_capacity(rbd_dev->disk, size);
+ revalidate_disk(rbd_dev->disk);
+ }
+}
+
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
u64 mapping_size;
@@ -3347,12 +3375,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
up_write(&rbd_dev->header_rwsem);
if (mapping_size != rbd_dev->mapping.size) {
- sector_t size;
-
- size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
- dout("setting size to %llu sectors", (unsigned long long)size);
- set_capacity(rbd_dev->disk, size);
- revalidate_disk(rbd_dev->disk);
+ rbd_dev_update_size(rbd_dev);
}
return ret;
@@ -3706,12 +3729,14 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
if (ret < sizeof (size_buf))
return -ERANGE;
- if (order)
+ if (order) {
*order = size_buf.order;
+ dout(" order %u", (unsigned int)*order);
+ }
*snap_size = le64_to_cpu(size_buf.size);
- dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
- (unsigned long long)snap_id, (unsigned int)*order,
+ dout(" snap_id 0x%016llx snap_size = %llu\n",
+ (unsigned long long)snap_id,
(unsigned long long)*snap_size);
return 0;
@@ -4059,8 +4084,13 @@ static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
snap_id = snapc->snaps[which];
snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
- if (IS_ERR(snap_name))
- break;
+ if (IS_ERR(snap_name)) {
+ /* ignore no-longer existing snapshots */
+ if (PTR_ERR(snap_name) == -ENOENT)
+ continue;
+ else
+ break;
+ }
found = !strcmp(name, snap_name);
kfree(snap_name);
}
@@ -4139,8 +4169,8 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
/* Look up the snapshot name, and make a copy */
snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
- if (!snap_name) {
- ret = -ENOMEM;
+ if (IS_ERR(snap_name)) {
+ ret = PTR_ERR(snap_name);
goto out_err;
}
@@ -5130,7 +5160,7 @@ static ssize_t rbd_remove(struct bus_type *bus,
bool already = false;
int ret;
- ret = strict_strtoul(buf, 10, &ul);
+ ret = kstrtoul(buf, 10, &ul);
if (ret)
return ret;
@@ -5161,10 +5191,23 @@ static ssize_t rbd_remove(struct bus_type *bus,
if (ret < 0 || already)
return ret;
- rbd_bus_del_dev(rbd_dev);
ret = rbd_dev_header_watch_sync(rbd_dev, false);
if (ret)
rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
+
+ /*
+ * flush remaining watch callbacks - these must be complete
+ * before the osd_client is shutdown
+ */
+ dout("%s: flushing notifies", __func__);
+ ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
+ /*
+ * Don't free anything from rbd_dev->disk until after all
+ * notifies are completely processed. Otherwise
+ * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
+ * in a potential use after free of rbd_dev->disk or rbd_dev.
+ */
+ rbd_bus_del_dev(rbd_dev);
rbd_dev_image_release(rbd_dev);
module_put(THIS_MODULE);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 8ed6ccb..b02d53a 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -924,7 +924,6 @@ static int swim_probe(struct platform_device *dev)
return 0;
out_kfree:
- platform_set_drvdata(dev, NULL);
kfree(swd);
out_iounmap:
iounmap(swim_base);
@@ -962,7 +961,6 @@ static int swim_remove(struct platform_device *dev)
if (res)
release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(dev, NULL);
kfree(swd);
return 0;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index fe5c3cd..c2014a0 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -620,7 +620,7 @@ static void backend_changed(struct xenbus_watch *watch,
}
/* Front end dir is a number, which is used as the handle. */
- err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
+ err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
if (err)
return;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0d91fe5..7737b5b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -255,10 +255,7 @@
#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/kmemcheck.h>
-
-#ifdef CONFIG_GENERIC_HARDIRQS
-# include <linux/irq.h>
-#endif
+#include <linux/irq.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 4519cb3..5796d01 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -766,6 +766,25 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int tpm_tis_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ int ret;
+
+ if (chip->vendor.irq)
+ tpm_tis_reenable_interrupts(chip);
+
+ ret = tpm_pm_resume(dev);
+ if (!ret)
+ tpm_do_selftest(chip);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
#ifdef CONFIG_PNP
static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
@@ -787,26 +806,6 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
return tpm_tis_init(&pnp_dev->dev, start, len, irq);
}
-static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
-{
- return tpm_pm_suspend(&dev->dev);
-}
-
-static int tpm_tis_pnp_resume(struct pnp_dev *dev)
-{
- struct tpm_chip *chip = pnp_get_drvdata(dev);
- int ret;
-
- if (chip->vendor.irq)
- tpm_tis_reenable_interrupts(chip);
-
- ret = tpm_pm_resume(&dev->dev);
- if (!ret)
- tpm_do_selftest(chip);
-
- return ret;
-}
-
static struct pnp_device_id tpm_pnp_tbl[] = {
{"PNP0C31", 0}, /* TPM */
{"ATM1200", 0}, /* Atmel */
@@ -835,9 +834,12 @@ static struct pnp_driver tis_pnp_driver = {
.name = "tpm_tis",
.id_table = tpm_pnp_tbl,
.probe = tpm_tis_pnp_init,
- .suspend = tpm_tis_pnp_suspend,
- .resume = tpm_tis_pnp_resume,
.remove = tpm_tis_pnp_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &tpm_tis_pm,
+ },
+#endif
};
#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
@@ -846,20 +848,6 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
-#ifdef CONFIG_PM_SLEEP
-static int tpm_tis_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- if (chip->vendor.irq)
- tpm_tis_reenable_interrupts(chip);
-
- return tpm_pm_resume(dev);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
-
static struct platform_driver tis_drv = {
.driver = {
.name = "tpm_tis",
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fc45567..b79cf3e 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1529,18 +1529,22 @@ static void remove_port_data(struct port *port)
{
struct port_buffer *buf;
+ spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
- reclaim_consumed_buffers(port);
-
/* Remove buffers we queued up for the Host to send us data in. */
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
free_buf(buf, true);
+ spin_unlock_irq(&port->inbuf_lock);
+
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
/* Free pending buffers from the out-queue. */
while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
free_buf(buf, true);
+ spin_unlock_irq(&port->outvq_lock);
}
/*
@@ -1554,6 +1558,7 @@ static void unplug_port(struct port *port)
list_del(&port->list);
spin_unlock_irq(&port->portdev->ports_lock);
+ spin_lock_irq(&port->inbuf_lock);
if (port->guest_connected) {
/* Let the app know the port is going down. */
send_sigio_to_port(port);
@@ -1564,6 +1569,7 @@ static void unplug_port(struct port *port)
wake_up_interruptible(&port->waitqueue);
}
+ spin_unlock_irq(&port->inbuf_lock);
if (is_console_port(port)) {
spin_lock_irq(&pdrvdata_lock);
@@ -1585,9 +1591,8 @@ static void unplug_port(struct port *port)
device_destroy(pdrvdata.class, port->dev->devt);
cdev_del(port->cdev);
- kfree(port->name);
-
debugfs_remove(port->debugfs_file);
+ kfree(port->name);
/*
* Locks around here are not necessary - a port can't be
@@ -1681,7 +1686,9 @@ static void handle_control_message(struct ports_device *portdev,
* If the guest is connected, it'll be interested in
* knowing the host connection state changed.
*/
+ spin_lock_irq(&port->inbuf_lock);
send_sigio_to_port(port);
+ spin_unlock_irq(&port->inbuf_lock);
break;
case VIRTIO_CONSOLE_PORT_NAME:
/*
@@ -1801,13 +1808,13 @@ static void in_intr(struct virtqueue *vq)
if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
discard_port_data(port);
+ /* Send a SIGIO indicating new data in case the process asked for it */
+ send_sigio_to_port(port);
+
spin_unlock_irqrestore(&port->inbuf_lock, flags);
wake_up_interruptible(&port->waitqueue);
- /* Send a SIGIO indicating new data in case the process asked for it */
- send_sigio_to_port(port);
-
if (is_console_port(port) && hvc_poll(port->cons.hvc))
hvc_kick();
}
@@ -2241,10 +2248,8 @@ static int __init init(void)
}
pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
- if (!pdrvdata.debugfs_dir) {
- pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
- PTR_ERR(pdrvdata.debugfs_dir));
- }
+ if (!pdrvdata.debugfs_dir)
+ pr_warning("Error creating debugfs dir for virtio-ports\n");
INIT_LIST_HEAD(&pdrvdata.consoles);
INIT_LIST_HEAD(&pdrvdata.portdevs);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 51380d6..279407a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -27,7 +27,7 @@ config COMMON_CLK_DEBUG
bool "DebugFS representation of clock tree"
select DEBUG_FS
---help---
- Creates a directory hierchy in debugfs for visualizing the clk
+ Creates a directory hierarchy in debugfs for visualizing the clk
tree structure. Each directory contains read-only members
that export information specific to that clk node: clk_rate,
clk_flags, clk_prepare_count, clk_enable_count &
@@ -64,6 +64,12 @@ config COMMON_CLK_SI5351
This driver supports Silicon Labs 5351A/B/C programmable clock
generators.
+config COMMON_CLK_S2MPS11
+ tristate "Clock driver for S2MPS11 MFD"
+ depends on MFD_SEC_CORE
+ ---help---
+ This driver supports S2MPS11 crystal oscillator clock.
+
config CLK_TWL6040
tristate "External McPDM functional clock from twl6040"
depends on TWL6040_CORE
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 4038c2b..7b11106 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -40,5 +40,6 @@ obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
+obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index 792bc57..5fb4ff5 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -23,7 +23,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
-static const __initconst struct of_device_id clk_match[] = {
+static const struct of_device_id clk_match[] __initconst = {
{ .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
{ }
};
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 6d55eb2..8d3009e 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -104,7 +104,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
struct clk_divider *divider = to_clk_divider(hw);
unsigned int div, val;
- val = readl(divider->reg) >> divider->shift;
+ val = clk_readl(divider->reg) >> divider->shift;
val &= div_mask(divider);
div = _get_div(divider, val);
@@ -230,11 +230,11 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = div_mask(divider) << (divider->shift + 16);
} else {
- val = readl(divider->reg);
+ val = clk_readl(divider->reg);
val &= ~(div_mask(divider) << divider->shift);
}
val |= value << divider->shift;
- writel(val, divider->reg);
+ clk_writel(val, divider->reg);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
@@ -317,6 +317,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
return _register_divider(dev, name, parent_name, flags, reg, shift,
width, clk_divider_flags, NULL, lock);
}
+EXPORT_SYMBOL_GPL(clk_register_divider);
/**
* clk_register_divider_table - register a table based divider clock with
@@ -341,3 +342,4 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
return _register_divider(dev, name, parent_name, flags, reg, shift,
width, clk_divider_flags, table, lock);
}
+EXPORT_SYMBOL_GPL(clk_register_divider_table);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 9ff7d51..0e1d89b 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -97,6 +97,8 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_fixed_factor);
+
#ifdef CONFIG_OF
/**
* of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index dc58fbd..1ed591a 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -80,6 +80,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
#ifdef CONFIG_OF
/**
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 790306e..4a58c55 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -58,7 +58,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
if (set)
reg |= BIT(gate->bit_idx);
} else {
- reg = readl(gate->reg);
+ reg = clk_readl(gate->reg);
if (set)
reg |= BIT(gate->bit_idx);
@@ -66,7 +66,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
reg &= ~BIT(gate->bit_idx);
}
- writel(reg, gate->reg);
+ clk_writel(reg, gate->reg);
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
@@ -89,7 +89,7 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
u32 reg;
struct clk_gate *gate = to_clk_gate(hw);
- reg = readl(gate->reg);
+ reg = clk_readl(gate->reg);
/* if a set bit disables this clk, flip it before masking */
if (gate->flags & CLK_GATE_SET_TO_DISABLE)
@@ -161,3 +161,4 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_gate);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 614444c..4f96ff3 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -42,7 +42,7 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
* OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
* val = 0x4 really means "bit 2, index starts at bit 0"
*/
- val = readl(mux->reg) >> mux->shift;
+ val = clk_readl(mux->reg) >> mux->shift;
val &= mux->mask;
if (mux->table) {
@@ -89,11 +89,11 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
if (mux->flags & CLK_MUX_HIWORD_MASK) {
val = mux->mask << (mux->shift + 16);
} else {
- val = readl(mux->reg);
+ val = clk_readl(mux->reg);
val &= ~(mux->mask << mux->shift);
}
val |= index << mux->shift;
- writel(val, mux->reg);
+ clk_writel(val, mux->reg);
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
@@ -104,9 +104,15 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
+const struct clk_ops clk_mux_ro_ops = {
+ .get_parent = clk_mux_get_parent,
+};
+EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
+
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
@@ -133,7 +139,10 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
}
init.name = name;
- init.ops = &clk_mux_ops;
+ if (clk_mux_flags & CLK_MUX_READ_ONLY)
+ init.ops = &clk_mux_ro_ops;
+ else
+ init.ops = &clk_mux_ops;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -154,6 +163,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_mux_table);
struct clk *clk_register_mux(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
@@ -166,3 +176,4 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
flags, reg, shift, mask, clk_mux_flags,
NULL, lock);
}
+EXPORT_SYMBOL_GPL(clk_register_mux);
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 6d819a3..51410c2 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -479,12 +479,12 @@ static void __init of_nomadik_src_clk_setup(struct device_node *np)
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
-static const __initconst struct of_device_id nomadik_src_match[] = {
+static const struct of_device_id nomadik_src_match[] __initconst = {
{ .compatible = "stericsson,nomadik-src" },
{ /* sentinel */ }
};
-static const __initconst struct of_device_id nomadik_src_clk_match[] = {
+static const struct of_device_id nomadik_src_clk_match[] __initconst = {
{
.compatible = "fixed-clock",
.data = of_fixed_clk_setup,
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index 643ca65..5ab95f1 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1034,7 +1034,7 @@ enum prima2_clk_index {
usb0, usb1, maxclk,
};
-static __initdata struct clk_hw* prima2_clk_hw_array[maxclk] = {
+static struct clk_hw *prima2_clk_hw_array[maxclk] __initdata = {
NULL, /* dummy */
NULL,
&clk_pll1.hw,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
new file mode 100644
index 0000000..7be41e6
--- /dev/null
+++ b/drivers/clk/clk-s2mps11.c
@@ -0,0 +1,273 @@
+/*
+ * clk-s2mps11.c - Clock driver for S2MPS11.
+ *
+ * Copyright (C) 2013 Samsung Electornics
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/core.h>
+
+#define s2mps11_name(a) (a->hw.init->name)
+
+static struct clk **clk_table;
+static struct clk_onecell_data clk_data;
+
+enum {
+ S2MPS11_CLK_AP = 0,
+ S2MPS11_CLK_CP,
+ S2MPS11_CLK_BT,
+ S2MPS11_CLKS_NUM,
+};
+
+struct s2mps11_clk {
+ struct sec_pmic_dev *iodev;
+ struct clk_hw hw;
+ struct clk *clk;
+ struct clk_lookup *lookup;
+ u32 mask;
+ bool enabled;
+};
+
+static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct s2mps11_clk, hw);
+}
+
+static int s2mps11_clk_prepare(struct clk_hw *hw)
+{
+ struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+ int ret;
+
+ ret = regmap_update_bits(s2mps11->iodev->regmap,
+ S2MPS11_REG_RTC_CTRL,
+ s2mps11->mask, s2mps11->mask);
+ if (!ret)
+ s2mps11->enabled = true;
+
+ return ret;
+}
+
+static void s2mps11_clk_unprepare(struct clk_hw *hw)
+{
+ struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+ int ret;
+
+ ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL,
+ s2mps11->mask, ~s2mps11->mask);
+
+ if (!ret)
+ s2mps11->enabled = false;
+}
+
+static int s2mps11_clk_is_enabled(struct clk_hw *hw)
+{
+ struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+
+ return s2mps11->enabled;
+}
+
+static unsigned long s2mps11_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+ if (s2mps11->enabled)
+ return 32768;
+ else
+ return 0;
+}
+
+static struct clk_ops s2mps11_clk_ops = {
+ .prepare = s2mps11_clk_prepare,
+ .unprepare = s2mps11_clk_unprepare,
+ .is_enabled = s2mps11_clk_is_enabled,
+ .recalc_rate = s2mps11_clk_recalc_rate,
+};
+
+static struct clk_init_data s2mps11_clks_init[S2MPS11_CLKS_NUM] = {
+ [S2MPS11_CLK_AP] = {
+ .name = "s2mps11_ap",
+ .ops = &s2mps11_clk_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ [S2MPS11_CLK_CP] = {
+ .name = "s2mps11_cp",
+ .ops = &s2mps11_clk_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ [S2MPS11_CLK_BT] = {
+ .name = "s2mps11_bt",
+ .ops = &s2mps11_clk_ops,
+ .flags = CLK_IS_ROOT,
+ },
+};
+
+static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct device_node *clk_np;
+ int i;
+
+ if (!iodev->dev->of_node)
+ return NULL;
+
+ clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks");
+ if (!clk_np) {
+ dev_err(&pdev->dev, "could not find clock sub-node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ clk_table = devm_kzalloc(&pdev->dev, sizeof(struct clk *) *
+ S2MPS11_CLKS_NUM, GFP_KERNEL);
+ if (!clk_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+ of_property_read_string_index(clk_np, "clock-output-names", i,
+ &s2mps11_clks_init[i].name);
+
+ return clk_np;
+}
+
+static int s2mps11_clk_probe(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct s2mps11_clk *s2mps11_clks, *s2mps11_clk;
+ struct device_node *clk_np = NULL;
+ int i, ret = 0;
+ u32 val;
+
+ s2mps11_clks = devm_kzalloc(&pdev->dev, sizeof(*s2mps11_clk) *
+ S2MPS11_CLKS_NUM, GFP_KERNEL);
+ if (!s2mps11_clks)
+ return -ENOMEM;
+
+ s2mps11_clk = s2mps11_clks;
+
+ clk_np = s2mps11_clk_parse_dt(pdev);
+ if (IS_ERR(clk_np))
+ return PTR_ERR(clk_np);
+
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) {
+ s2mps11_clk->iodev = iodev;
+ s2mps11_clk->hw.init = &s2mps11_clks_init[i];
+ s2mps11_clk->mask = 1 << i;
+
+ ret = regmap_read(s2mps11_clk->iodev->regmap,
+ S2MPS11_REG_RTC_CTRL, &val);
+ if (ret < 0)
+ goto err_reg;
+
+ s2mps11_clk->enabled = val & s2mps11_clk->mask;
+
+ s2mps11_clk->clk = devm_clk_register(&pdev->dev,
+ &s2mps11_clk->hw);
+ if (IS_ERR(s2mps11_clk->clk)) {
+ dev_err(&pdev->dev, "Fail to register : %s\n",
+ s2mps11_name(s2mps11_clk));
+ ret = PTR_ERR(s2mps11_clk->clk);
+ goto err_reg;
+ }
+
+ s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk_lookup), GFP_KERNEL);
+ if (!s2mps11_clk->lookup) {
+ ret = -ENOMEM;
+ goto err_lup;
+ }
+
+ s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
+ s2mps11_clk->lookup->clk = s2mps11_clk->clk;
+
+ clkdev_add(s2mps11_clk->lookup);
+ }
+
+ if (clk_table) {
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+ clk_table[i] = s2mps11_clks[i].clk;
+
+ clk_data.clks = clk_table;
+ clk_data.clk_num = S2MPS11_CLKS_NUM;
+ of_clk_add_provider(clk_np, of_clk_src_onecell_get, &clk_data);
+ }
+
+ platform_set_drvdata(pdev, s2mps11_clks);
+
+ return ret;
+err_lup:
+ devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
+err_reg:
+ while (s2mps11_clk > s2mps11_clks) {
+ if (s2mps11_clk->lookup) {
+ clkdev_drop(s2mps11_clk->lookup);
+ devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
+ }
+ s2mps11_clk--;
+ }
+
+ return ret;
+}
+
+static int s2mps11_clk_remove(struct platform_device *pdev)
+{
+ struct s2mps11_clk *s2mps11_clks = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+ clkdev_drop(s2mps11_clks[i].lookup);
+
+ return 0;
+}
+
+static const struct platform_device_id s2mps11_clk_id[] = {
+ { "s2mps11-clk", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
+
+static struct platform_driver s2mps11_clk_driver = {
+ .driver = {
+ .name = "s2mps11-clk",
+ .owner = THIS_MODULE,
+ },
+ .probe = s2mps11_clk_probe,
+ .remove = s2mps11_clk_remove,
+ .id_table = s2mps11_clk_id,
+};
+
+static int __init s2mps11_clk_init(void)
+{
+ return platform_driver_register(&s2mps11_clk_driver);
+}
+subsys_initcall(s2mps11_clk_init);
+
+static void __init s2mps11_clk_cleanup(void)
+{
+ platform_driver_unregister(&s2mps11_clk_driver);
+}
+module_exit(s2mps11_clk_cleanup);
+
+MODULE_DESCRIPTION("S2MPS11 Clock Driver");
+MODULE_AUTHOR("Yadwinder Singh Brar <yadi.brar@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
index 8774e05..3efbdd0 100644
--- a/drivers/clk/clk-u300.c
+++ b/drivers/clk/clk-u300.c
@@ -746,7 +746,7 @@ struct u300_clock {
u16 clk_val;
};
-struct u300_clock const __initconst u300_clk_lookup[] = {
+static struct u300_clock const u300_clk_lookup[] __initconst = {
{
.type = U300_CLK_TYPE_REST,
.id = 3,
@@ -1151,7 +1151,7 @@ static void __init of_u300_syscon_mclk_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
-static const __initconst struct of_device_id u300_clk_match[] = {
+static const struct of_device_id u300_clk_match[] __initconst = {
{
.compatible = "fixed-clock",
.data = of_fixed_clk_setup,
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 1b3f8c9..805b4c3 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -31,7 +31,7 @@ struct wm831x_clk {
bool xtal_ena;
};
-static int wm831x_xtal_is_enabled(struct clk_hw *hw)
+static int wm831x_xtal_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
xtal_hw);
@@ -52,7 +52,7 @@ static unsigned long wm831x_xtal_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops wm831x_xtal_ops = {
- .is_enabled = wm831x_xtal_is_enabled,
+ .is_prepared = wm831x_xtal_is_prepared,
.recalc_rate = wm831x_xtal_recalc_rate,
};
@@ -73,7 +73,7 @@ static const unsigned long wm831x_fll_auto_rates[] = {
24576000,
};
-static int wm831x_fll_is_enabled(struct clk_hw *hw)
+static int wm831x_fll_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
fll_hw);
@@ -170,7 +170,7 @@ static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
if (i == ARRAY_SIZE(wm831x_fll_auto_rates))
return -EINVAL;
- if (wm831x_fll_is_enabled(hw))
+ if (wm831x_fll_is_prepared(hw))
return -EPERM;
return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_2,
@@ -220,7 +220,7 @@ static u8 wm831x_fll_get_parent(struct clk_hw *hw)
}
static const struct clk_ops wm831x_fll_ops = {
- .is_enabled = wm831x_fll_is_enabled,
+ .is_prepared = wm831x_fll_is_prepared,
.prepare = wm831x_fll_prepare,
.unprepare = wm831x_fll_unprepare,
.round_rate = wm831x_fll_round_rate,
@@ -237,7 +237,7 @@ static struct clk_init_data wm831x_fll_init = {
.flags = CLK_SET_RATE_GATE,
};
-static int wm831x_clkout_is_enabled(struct clk_hw *hw)
+static int wm831x_clkout_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
clkout_hw);
@@ -335,7 +335,7 @@ static int wm831x_clkout_set_parent(struct clk_hw *hw, u8 parent)
}
static const struct clk_ops wm831x_clkout_ops = {
- .is_enabled = wm831x_clkout_is_enabled,
+ .is_prepared = wm831x_clkout_is_prepared,
.prepare = wm831x_clkout_prepare,
.unprepare = wm831x_clkout_unprepare,
.get_parent = wm831x_clkout_get_parent,
@@ -360,6 +360,8 @@ static int wm831x_clk_probe(struct platform_device *pdev)
if (!clkdata)
return -ENOMEM;
+ clkdata->wm831x = wm831x;
+
/* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
if (ret < 0) {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 54a191c..a004769 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -458,7 +458,6 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
clk->ops->unprepare(clk->hw);
}
}
-EXPORT_SYMBOL_GPL(__clk_get_flags);
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
@@ -559,6 +558,19 @@ struct clk *__clk_get_parent(struct clk *clk)
return !clk ? NULL : clk->parent;
}
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+ if (!clk || index >= clk->num_parents)
+ return NULL;
+ else if (!clk->parents)
+ return __clk_lookup(clk->parent_names[index]);
+ else if (!clk->parents[index])
+ return clk->parents[index] =
+ __clk_lookup(clk->parent_names[index]);
+ else
+ return clk->parents[index];
+}
+
unsigned int __clk_get_enable_count(struct clk *clk)
{
return !clk ? 0 : clk->enable_count;
@@ -594,6 +606,7 @@ unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? 0 : clk->flags;
}
+EXPORT_SYMBOL_GPL(__clk_get_flags);
bool __clk_is_prepared(struct clk *clk)
{
@@ -679,6 +692,55 @@ struct clk *__clk_lookup(const char *name)
return NULL;
}
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_p)
+{
+ struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+ int i, num_parents;
+ unsigned long parent_rate, best = 0;
+
+ /* if NO_REPARENT flag set, pass through to current parent */
+ if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
+ parent = clk->parent;
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ best = __clk_round_rate(parent, rate);
+ else if (parent)
+ best = __clk_get_rate(parent);
+ else
+ best = __clk_get_rate(clk);
+ goto out;
+ }
+
+ /* find the parent that can provide the fastest rate <= rate */
+ num_parents = clk->num_parents;
+ for (i = 0; i < num_parents; i++) {
+ parent = clk_get_parent_by_index(clk, i);
+ if (!parent)
+ continue;
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ parent_rate = __clk_round_rate(parent, rate);
+ else
+ parent_rate = __clk_get_rate(parent);
+ if (parent_rate <= rate && parent_rate > best) {
+ best_parent = parent;
+ best = parent_rate;
+ }
+ }
+
+out:
+ if (best_parent)
+ *best_parent_p = best_parent;
+ *best_parent_rate = best;
+
+ return best;
+}
+
/*** clk api ***/
void __clk_unprepare(struct clk *clk)
@@ -702,7 +764,7 @@ void __clk_unprepare(struct clk *clk)
/**
* clk_unprepare - undo preparation of a clock source
- * @clk: the clk being unprepare
+ * @clk: the clk being unprepared
*
* clk_unprepare may sleep, which differentiates it from clk_disable. In a
* simple case, clk_unprepare can be used instead of clk_disable to gate a clk
@@ -869,27 +931,31 @@ EXPORT_SYMBOL_GPL(clk_enable);
/**
* __clk_round_rate - round the given rate for a clk
* @clk: round the rate of this clock
+ * @rate: the rate which is to be rounded
*
* Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long parent_rate = 0;
+ struct clk *parent;
if (!clk)
return 0;
- if (!clk->ops->round_rate) {
- if (clk->flags & CLK_SET_RATE_PARENT)
- return __clk_round_rate(clk->parent, rate);
- else
- return clk->rate;
- }
-
- if (clk->parent)
- parent_rate = clk->parent->rate;
-
- return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+ parent = clk->parent;
+ if (parent)
+ parent_rate = parent->rate;
+
+ if (clk->ops->determine_rate)
+ return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
+ &parent);
+ else if (clk->ops->round_rate)
+ return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+ else if (clk->flags & CLK_SET_RATE_PARENT)
+ return __clk_round_rate(clk->parent, rate);
+ else
+ return clk->rate;
}
/**
@@ -956,7 +1022,7 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
*
* Walks the subtree of clks starting with clk and recalculates rates as it
* goes. Note that if a clk does not implement the .recalc_rate callback then
- * it is assumed that the clock will take on the rate of it's parent.
+ * it is assumed that the clock will take on the rate of its parent.
*
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
@@ -1014,6 +1080,115 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_rate);
+static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+{
+ u8 i;
+
+ if (!clk->parents)
+ clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+ GFP_KERNEL);
+
+ /*
+ * find index of new parent clock using cached parent ptrs,
+ * or if not yet cached, use string name comparison and cache
+ * them now to avoid future calls to __clk_lookup.
+ */
+ for (i = 0; i < clk->num_parents; i++) {
+ if (clk->parents && clk->parents[i] == parent)
+ break;
+ else if (!strcmp(clk->parent_names[i], parent->name)) {
+ if (clk->parents)
+ clk->parents[i] = __clk_lookup(parent->name);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static void clk_reparent(struct clk *clk, struct clk *new_parent)
+{
+ hlist_del(&clk->child_node);
+
+ if (new_parent) {
+ /* avoid duplicate POST_RATE_CHANGE notifications */
+ if (new_parent->new_child == clk)
+ new_parent->new_child = NULL;
+
+ hlist_add_head(&clk->child_node, &new_parent->children);
+ } else {
+ hlist_add_head(&clk->child_node, &clk_orphan_list);
+ }
+
+ clk->parent = new_parent;
+}
+
+static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct clk *old_parent = clk->parent;
+
+ /*
+ * Migrate prepare state between parents and prevent race with
+ * clk_enable().
+ *
+ * If the clock is not prepared, then a race with
+ * clk_enable/disable() is impossible since we already have the
+ * prepare lock (future calls to clk_enable() need to be preceded by
+ * a clk_prepare()).
+ *
+ * If the clock is prepared, migrate the prepared state to the new
+ * parent and also protect against a race with clk_enable() by
+ * forcing the clock and the new parent on. This ensures that all
+ * future calls to clk_enable() are practically NOPs with respect to
+ * hardware and software states.
+ *
+ * See also: Comment for clk_set_parent() below.
+ */
+ if (clk->prepare_count) {
+ __clk_prepare(parent);
+ clk_enable(parent);
+ clk_enable(clk);
+ }
+
+ /* update the clk tree topology */
+ flags = clk_enable_lock();
+ clk_reparent(clk, parent);
+ clk_enable_unlock(flags);
+
+ /* change clock input source */
+ if (parent && clk->ops->set_parent)
+ ret = clk->ops->set_parent(clk->hw, p_index);
+
+ if (ret) {
+ flags = clk_enable_lock();
+ clk_reparent(clk, old_parent);
+ clk_enable_unlock(flags);
+
+ if (clk->prepare_count) {
+ clk_disable(clk);
+ clk_disable(parent);
+ __clk_unprepare(parent);
+ }
+ return ret;
+ }
+
+ /*
+ * Finish the migration of prepare state and undo the changes done
+ * for preventing a race with clk_enable().
+ */
+ if (clk->prepare_count) {
+ clk_disable(clk);
+ clk_disable(old_parent);
+ __clk_unprepare(old_parent);
+ }
+
+ /* update debugfs with new clk tree topology */
+ clk_debug_reparent(clk, parent);
+ return 0;
+}
+
/**
* __clk_speculate_rates
* @clk: first clk in the subtree
@@ -1026,7 +1201,7 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
* pre-rate change notifications and returns early if no clks in the
* subtree have subscribed to the notifications. Note that if a clk does not
* implement the .recalc_rate callback then it is assumed that the clock will
- * take on the rate of it's parent.
+ * take on the rate of its parent.
*
* Caller must hold prepare_lock.
*/
@@ -1058,18 +1233,25 @@ out:
return ret;
}
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
+static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
+ struct clk *new_parent, u8 p_index)
{
struct clk *child;
clk->new_rate = new_rate;
+ clk->new_parent = new_parent;
+ clk->new_parent_index = p_index;
+ /* include clk in new parent's PRE_RATE_CHANGE notifications */
+ clk->new_child = NULL;
+ if (new_parent && new_parent != clk->parent)
+ new_parent->new_child = clk;
hlist_for_each_entry(child, &clk->children, child_node) {
if (child->ops->recalc_rate)
child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
else
child->new_rate = new_rate;
- clk_calc_subtree(child, child->new_rate);
+ clk_calc_subtree(child, child->new_rate, NULL, 0);
}
}
@@ -1080,50 +1262,63 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
+ struct clk *old_parent, *parent;
unsigned long best_parent_rate = 0;
unsigned long new_rate;
+ u8 p_index = 0;
/* sanity */
if (IS_ERR_OR_NULL(clk))
return NULL;
/* save parent rate, if it exists */
- if (clk->parent)
- best_parent_rate = clk->parent->rate;
-
- /* never propagate up to the parent */
- if (!(clk->flags & CLK_SET_RATE_PARENT)) {
- if (!clk->ops->round_rate) {
- clk->new_rate = clk->rate;
- return NULL;
- }
- new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+ parent = old_parent = clk->parent;
+ if (parent)
+ best_parent_rate = parent->rate;
+
+ /* find the closest rate and parent clk/rate */
+ if (clk->ops->determine_rate) {
+ new_rate = clk->ops->determine_rate(clk->hw, rate,
+ &best_parent_rate,
+ &parent);
+ } else if (clk->ops->round_rate) {
+ new_rate = clk->ops->round_rate(clk->hw, rate,
+ &best_parent_rate);
+ } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
+ /* pass-through clock without adjustable parent */
+ clk->new_rate = clk->rate;
+ return NULL;
+ } else {
+ /* pass-through clock with adjustable parent */
+ top = clk_calc_new_rates(parent, rate);
+ new_rate = parent->new_rate;
goto out;
}
- /* need clk->parent from here on out */
- if (!clk->parent) {
- pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
+ /* some clocks must be gated to change parent */
+ if (parent != old_parent &&
+ (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
+ pr_debug("%s: %s not gated but wants to reparent\n",
+ __func__, clk->name);
return NULL;
}
- if (!clk->ops->round_rate) {
- top = clk_calc_new_rates(clk->parent, rate);
- new_rate = clk->parent->new_rate;
-
- goto out;
+ /* try finding the new parent index */
+ if (parent) {
+ p_index = clk_fetch_parent_index(clk, parent);
+ if (p_index == clk->num_parents) {
+ pr_debug("%s: clk %s can not be parent of clk %s\n",
+ __func__, parent->name, clk->name);
+ return NULL;
+ }
}
- new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
-
- if (best_parent_rate != clk->parent->rate) {
- top = clk_calc_new_rates(clk->parent, best_parent_rate);
-
- goto out;
- }
+ if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
+ best_parent_rate != parent->rate)
+ top = clk_calc_new_rates(parent, best_parent_rate);
out:
- clk_calc_subtree(clk, new_rate);
+ clk_calc_subtree(clk, new_rate, parent, p_index);
return top;
}
@@ -1135,7 +1330,7 @@ out:
*/
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{
- struct clk *child, *fail_clk = NULL;
+ struct clk *child, *tmp_clk, *fail_clk = NULL;
int ret = NOTIFY_DONE;
if (clk->rate == clk->new_rate)
@@ -1148,9 +1343,19 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
}
hlist_for_each_entry(child, &clk->children, child_node) {
- clk = clk_propagate_rate_change(child, event);
- if (clk)
- fail_clk = clk;
+ /* Skip children who will be reparented to another clock */
+ if (child->new_parent && child->new_parent != clk)
+ continue;
+ tmp_clk = clk_propagate_rate_change(child, event);
+ if (tmp_clk)
+ fail_clk = tmp_clk;
+ }
+
+ /* handle the new child who might not be in clk->children yet */
+ if (clk->new_child) {
+ tmp_clk = clk_propagate_rate_change(clk->new_child, event);
+ if (tmp_clk)
+ fail_clk = tmp_clk;
}
return fail_clk;
@@ -1168,6 +1373,10 @@ static void clk_change_rate(struct clk *clk)
old_rate = clk->rate;
+ /* set parent */
+ if (clk->new_parent && clk->new_parent != clk->parent)
+ __clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
+
if (clk->parent)
best_parent_rate = clk->parent->rate;
@@ -1182,8 +1391,16 @@ static void clk_change_rate(struct clk *clk)
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
- hlist_for_each_entry(child, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node) {
+ /* Skip children who will be reparented to another clock */
+ if (child->new_parent && child->new_parent != clk)
+ continue;
clk_change_rate(child);
+ }
+
+ /* handle the new child who might not be in clk->children yet */
+ if (clk->new_child)
+ clk_change_rate(clk->new_child);
}
/**
@@ -1198,7 +1415,7 @@ static void clk_change_rate(struct clk *clk)
* outcome of clk's .round_rate implementation. If *parent_rate is unchanged
* after calling .round_rate then upstream parent propagation is ignored. If
* *parent_rate comes back with a new rate for clk's parent then we propagate
- * up to clk's parent and set it's rate. Upward propagation will continue
+ * up to clk's parent and set its rate. Upward propagation will continue
* until either a clk does not support the CLK_SET_RATE_PARENT flag or
* .round_rate stops requesting changes to clk's parent_rate.
*
@@ -1212,6 +1429,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
struct clk *top, *fail_clk;
int ret = 0;
+ if (!clk)
+ return 0;
+
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
@@ -1315,30 +1535,12 @@ static struct clk *__clk_init_parent(struct clk *clk)
kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
- if (!clk->parents)
- ret = __clk_lookup(clk->parent_names[index]);
- else if (!clk->parents[index])
- ret = clk->parents[index] =
- __clk_lookup(clk->parent_names[index]);
- else
- ret = clk->parents[index];
+ ret = clk_get_parent_by_index(clk, index);
out:
return ret;
}
-static void clk_reparent(struct clk *clk, struct clk *new_parent)
-{
- hlist_del(&clk->child_node);
-
- if (new_parent)
- hlist_add_head(&clk->child_node, &new_parent->children);
- else
- hlist_add_head(&clk->child_node, &clk_orphan_list);
-
- clk->parent = new_parent;
-}
-
void __clk_reparent(struct clk *clk, struct clk *new_parent)
{
clk_reparent(clk, new_parent);
@@ -1346,98 +1548,6 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
-static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
-{
- u8 i;
-
- if (!clk->parents)
- clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
- GFP_KERNEL);
-
- /*
- * find index of new parent clock using cached parent ptrs,
- * or if not yet cached, use string name comparison and cache
- * them now to avoid future calls to __clk_lookup.
- */
- for (i = 0; i < clk->num_parents; i++) {
- if (clk->parents && clk->parents[i] == parent)
- break;
- else if (!strcmp(clk->parent_names[i], parent->name)) {
- if (clk->parents)
- clk->parents[i] = __clk_lookup(parent->name);
- break;
- }
- }
-
- return i;
-}
-
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
-{
- unsigned long flags;
- int ret = 0;
- struct clk *old_parent = clk->parent;
-
- /*
- * Migrate prepare state between parents and prevent race with
- * clk_enable().
- *
- * If the clock is not prepared, then a race with
- * clk_enable/disable() is impossible since we already have the
- * prepare lock (future calls to clk_enable() need to be preceded by
- * a clk_prepare()).
- *
- * If the clock is prepared, migrate the prepared state to the new
- * parent and also protect against a race with clk_enable() by
- * forcing the clock and the new parent on. This ensures that all
- * future calls to clk_enable() are practically NOPs with respect to
- * hardware and software states.
- *
- * See also: Comment for clk_set_parent() below.
- */
- if (clk->prepare_count) {
- __clk_prepare(parent);
- clk_enable(parent);
- clk_enable(clk);
- }
-
- /* update the clk tree topology */
- flags = clk_enable_lock();
- clk_reparent(clk, parent);
- clk_enable_unlock(flags);
-
- /* change clock input source */
- if (parent && clk->ops->set_parent)
- ret = clk->ops->set_parent(clk->hw, p_index);
-
- if (ret) {
- flags = clk_enable_lock();
- clk_reparent(clk, old_parent);
- clk_enable_unlock(flags);
-
- if (clk->prepare_count) {
- clk_disable(clk);
- clk_disable(parent);
- __clk_unprepare(parent);
- }
- return ret;
- }
-
- /*
- * Finish the migration of prepare state and undo the changes done
- * for preventing a race with clk_enable().
- */
- if (clk->prepare_count) {
- clk_disable(clk);
- clk_disable(old_parent);
- __clk_unprepare(old_parent);
- }
-
- /* update debugfs with new clk tree topology */
- clk_debug_reparent(clk, parent);
- return 0;
-}
-
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
@@ -1461,7 +1571,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
u8 p_index = 0;
unsigned long p_rate = 0;
- if (!clk || !clk->ops)
+ if (!clk)
+ return 0;
+
+ if (!clk->ops)
return -EINVAL;
/* verify ops for for multi-parent clks */
@@ -1544,8 +1657,9 @@ int __clk_init(struct device *dev, struct clk *clk)
/* check that clk_ops are sane. See Documentation/clk.txt */
if (clk->ops->set_rate &&
- !(clk->ops->round_rate && clk->ops->recalc_rate)) {
- pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+ !((clk->ops->round_rate || clk->ops->determine_rate) &&
+ clk->ops->recalc_rate)) {
+ pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
__func__, clk->name);
ret = -EINVAL;
goto out;
@@ -1628,7 +1742,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* this clock
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
- if (orphan->ops->get_parent) {
+ if (orphan->num_parents && orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw);
if (!strcmp(clk->name, orphan->parent_names[i]))
__clk_reparent(orphan, clk);
@@ -1648,7 +1762,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* The .init callback is not used by any of the basic clock types, but
* exists for weird hardware that must perform initialization magic.
* Please consider other ways of solving initialization problems before
- * using this callback, as it's use is discouraged.
+ * using this callback, as its use is discouraged.
*/
if (clk->ops->init)
clk->ops->init(clk->hw);
@@ -1675,7 +1789,7 @@ out:
* very large numbers of clocks that need to be statically initialized. It is
* a layering violation to include clk-private.h from any code which implements
* a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations. Returns 0
+ * separate C file from the logic that implements its operations. Returns 0
* on success, otherwise an error code.
*/
struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
@@ -2115,13 +2229,13 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
*/
void __init of_clk_init(const struct of_device_id *matches)
{
+ const struct of_device_id *match;
struct device_node *np;
if (!matches)
matches = __clk_of_table;
- for_each_matching_node(np, matches) {
- const struct of_device_id *match = of_match_node(matches, np);
+ for_each_matching_node_and_match(np, matches, &match) {
of_clk_init_cb_t clk_init_cb = match->data;
clk_init_cb(np);
}
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index d1f1a19..b2721ca 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -248,7 +248,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp2-pwm.3");
clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
clk_set_parent(clk, vctcxo);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -258,7 +259,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
clk_set_parent(clk, vctcxo);
clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -268,7 +270,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART2, 4, 3, 0, &clk_lock);
clk_set_parent(clk, vctcxo);
clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -278,7 +281,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
clk = clk_register_mux(NULL, "uart3_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART3, 4, 3, 0, &clk_lock);
clk_set_parent(clk, vctcxo);
clk_register_clkdev(clk, "uart_mux.3", NULL);
@@ -288,7 +292,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.3");
clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -297,7 +302,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.0");
clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.1", NULL);
@@ -306,7 +312,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.1");
clk = clk_register_mux(NULL, "ssp2_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP2, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.2", NULL);
@@ -315,7 +322,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.2");
clk = clk_register_mux(NULL, "ssp3_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP3, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.3", NULL);
@@ -324,7 +332,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.3");
clk = clk_register_mux(NULL, "sdh_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH0, 8, 2, 0, &clk_lock);
clk_register_clkdev(clk, "sdh_mux", NULL);
@@ -354,7 +363,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, "usb_clk", NULL);
clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
- ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(disp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_DISP0, 6, 2, 0, &clk_lock);
clk_register_clkdev(clk, "disp_mux.0", NULL);
@@ -376,7 +386,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, "disp_sphy.0", NULL);
clk = clk_register_mux(NULL, "disp1_mux", disp_parent,
- ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(disp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_DISP1, 6, 2, 0, &clk_lock);
clk_register_clkdev(clk, "disp_mux.1", NULL);
@@ -394,7 +405,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, "ccic_arbiter", NULL);
clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
- ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ccic_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_CCIC0, 6, 2, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_mux.0", NULL);
@@ -421,7 +433,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, "sphyclk", "mmp-ccic.0");
clk = clk_register_mux(NULL, "ccic1_mux", ccic_parent,
- ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ccic_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_CCIC1, 6, 2, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_mux.1", NULL);
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 28b3b51..014396b 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -199,7 +199,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa168-pwm.3");
clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -209,7 +210,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -219,7 +221,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART2, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -229,7 +232,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -238,7 +242,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.0");
clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.1", NULL);
@@ -247,7 +252,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.1");
clk = clk_register_mux(NULL, "ssp2_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP2, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.2", NULL);
@@ -256,7 +262,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.2");
clk = clk_register_mux(NULL, "ssp3_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP3, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.3", NULL);
@@ -265,7 +272,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.3");
clk = clk_register_mux(NULL, "ssp4_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP4, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.4", NULL);
@@ -278,7 +286,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa3xx-nand.0");
clk = clk_register_mux(NULL, "sdh0_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "sdh0_mux", NULL);
@@ -287,7 +296,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "sdhci-pxa.0");
clk = clk_register_mux(NULL, "sdh1_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH1, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "sdh1_mux", NULL);
@@ -304,7 +314,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, "sph_clk", NULL);
clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
- ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(disp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_DISP0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "disp_mux.0", NULL);
@@ -317,7 +328,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, "hclk", "mmp-disp.0");
clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
- ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ccic_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_CCIC0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_mux.0", NULL);
@@ -327,8 +339,8 @@ void __init pxa168_clk_init(void)
clk = clk_register_mux(NULL, "ccic0_phy_mux", ccic_phy_parent,
ARRAY_SIZE(ccic_phy_parent),
- CLK_SET_RATE_PARENT, apmu_base + APMU_CCIC0,
- 7, 1, 0, &clk_lock);
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ apmu_base + APMU_CCIC0, 7, 1, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_phy_mux.0", NULL);
clk = mmp_clk_register_apmu("ccic0_phy", "ccic0_phy_mux",
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 6ec0569..9efc6a4 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -204,7 +204,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa910-pwm.3");
clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -214,7 +215,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -224,7 +226,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbcp_base + APBCP_UART2, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -234,7 +237,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -243,7 +247,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.0");
clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.1", NULL);
@@ -256,7 +261,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa3xx-nand.0");
clk = clk_register_mux(NULL, "sdh0_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "sdh0_mux", NULL);
@@ -265,7 +271,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "sdhci-pxa.0");
clk = clk_register_mux(NULL, "sdh1_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH1, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "sdh1_mux", NULL);
@@ -282,7 +289,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, "sph_clk", NULL);
clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
- ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(disp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_DISP0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "disp_mux.0", NULL);
@@ -291,7 +299,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-disp.0");
clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
- ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ccic_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_CCIC0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_mux.0", NULL);
@@ -301,8 +310,8 @@ void __init pxa910_clk_init(void)
clk = clk_register_mux(NULL, "ccic0_phy_mux", ccic_phy_parent,
ARRAY_SIZE(ccic_phy_parent),
- CLK_SET_RATE_PARENT, apmu_base + APMU_CCIC0,
- 7, 1, 0, &clk_lock);
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ apmu_base + APMU_CCIC0, 7, 1, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_phy_mux.0", NULL);
clk = mmp_clk_register_apmu("ccic0_phy", "ccic0_phy_mux",
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 079960e..fc777bd 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -32,13 +32,13 @@
enum { A370_CPU_TO_NBCLK, A370_CPU_TO_HCLK, A370_CPU_TO_DRAMCLK };
-static const struct coreclk_ratio __initconst a370_coreclk_ratios[] = {
+static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
{ .id = A370_CPU_TO_NBCLK, .name = "nbclk" },
{ .id = A370_CPU_TO_HCLK, .name = "hclk" },
{ .id = A370_CPU_TO_DRAMCLK, .name = "dramclk" },
};
-static const u32 __initconst a370_tclk_freqs[] = {
+static const u32 a370_tclk_freqs[] __initconst = {
16600000,
20000000,
};
@@ -52,7 +52,7 @@ static u32 __init a370_get_tclk_freq(void __iomem *sar)
return a370_tclk_freqs[tclk_freq_select];
}
-static const u32 __initconst a370_cpu_freqs[] = {
+static const u32 a370_cpu_freqs[] __initconst = {
400000000,
533000000,
667000000,
@@ -78,7 +78,7 @@ static u32 __init a370_get_cpu_freq(void __iomem *sar)
return cpu_freq;
}
-static const int __initconst a370_nbclk_ratios[32][2] = {
+static const int a370_nbclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 2}, {2, 2},
{1, 2}, {1, 2}, {1, 1}, {2, 3},
{0, 1}, {1, 2}, {2, 4}, {0, 1},
@@ -89,7 +89,7 @@ static const int __initconst a370_nbclk_ratios[32][2] = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
};
-static const int __initconst a370_hclk_ratios[32][2] = {
+static const int a370_hclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 6}, {2, 3},
{1, 3}, {1, 4}, {1, 2}, {2, 6},
{0, 1}, {1, 6}, {2, 10}, {0, 1},
@@ -100,7 +100,7 @@ static const int __initconst a370_hclk_ratios[32][2] = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
};
-static const int __initconst a370_dramclk_ratios[32][2] = {
+static const int a370_dramclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 3}, {2, 3},
{1, 3}, {1, 2}, {1, 2}, {2, 6},
{0, 1}, {1, 3}, {2, 5}, {0, 1},
@@ -152,7 +152,7 @@ CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock",
* Clock Gating Control
*/
-static const struct clk_gating_soc_desc __initconst a370_gating_desc[] = {
+static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = {
{ "audio", NULL, 0, 0 },
{ "pex0_en", NULL, 1, 0 },
{ "pex1_en", NULL, 2, 0 },
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 13b62ce..9922c44 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -40,7 +40,7 @@
enum { AXP_CPU_TO_NBCLK, AXP_CPU_TO_HCLK, AXP_CPU_TO_DRAMCLK };
-static const struct coreclk_ratio __initconst axp_coreclk_ratios[] = {
+static const struct coreclk_ratio axp_coreclk_ratios[] __initconst = {
{ .id = AXP_CPU_TO_NBCLK, .name = "nbclk" },
{ .id = AXP_CPU_TO_HCLK, .name = "hclk" },
{ .id = AXP_CPU_TO_DRAMCLK, .name = "dramclk" },
@@ -52,7 +52,7 @@ static u32 __init axp_get_tclk_freq(void __iomem *sar)
return 250000000;
}
-static const u32 __initconst axp_cpu_freqs[] = {
+static const u32 axp_cpu_freqs[] __initconst = {
1000000000,
1066000000,
1200000000,
@@ -89,7 +89,7 @@ static u32 __init axp_get_cpu_freq(void __iomem *sar)
return cpu_freq;
}
-static const int __initconst axp_nbclk_ratios[32][2] = {
+static const int axp_nbclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 2}, {2, 2},
{1, 2}, {1, 2}, {1, 1}, {2, 3},
{0, 1}, {1, 2}, {2, 4}, {0, 1},
@@ -100,7 +100,7 @@ static const int __initconst axp_nbclk_ratios[32][2] = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
};
-static const int __initconst axp_hclk_ratios[32][2] = {
+static const int axp_hclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 6}, {2, 3},
{1, 3}, {1, 4}, {1, 2}, {2, 6},
{0, 1}, {1, 6}, {2, 10}, {0, 1},
@@ -111,7 +111,7 @@ static const int __initconst axp_hclk_ratios[32][2] = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
};
-static const int __initconst axp_dramclk_ratios[32][2] = {
+static const int axp_dramclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 3}, {2, 3},
{1, 3}, {1, 2}, {1, 2}, {2, 6},
{0, 1}, {1, 3}, {2, 5}, {0, 1},
@@ -169,7 +169,7 @@ CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock",
* Clock Gating Control
*/
-static const struct clk_gating_soc_desc __initconst axp_gating_desc[] = {
+static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
{ "audio", NULL, 0, 0 },
{ "ge3", NULL, 1, 0 },
{ "ge2", NULL, 2, 0 },
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index b0fbc07..1466865 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -119,7 +119,7 @@ void __init of_cpu_clk_setup(struct device_node *node)
cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
if (WARN_ON(!cpuclk))
- return;
+ goto cpuclk_out;
clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
if (WARN_ON(!clks))
@@ -170,6 +170,8 @@ bail_out:
kfree(cpuclk[ncpus].clk_name);
clks_out:
kfree(cpuclk);
+cpuclk_out:
+ iounmap(clock_complex_base);
}
CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index adaa4a1..25ceccf 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -45,8 +45,10 @@ void __init mvebu_coreclk_setup(struct device_node *np,
clk_data.clk_num = 2 + desc->num_ratios;
clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
GFP_KERNEL);
- if (WARN_ON(!clk_data.clks))
+ if (WARN_ON(!clk_data.clks)) {
+ iounmap(base);
return;
+ }
/* Register TCLK */
of_property_read_string_index(np, "clock-output-names", 0,
@@ -134,7 +136,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (WARN_ON(!ctrl))
- return;
+ goto ctrl_out;
spin_lock_init(&ctrl->lock);
@@ -145,10 +147,8 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
ctrl->num_gates = n;
ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
GFP_KERNEL);
- if (WARN_ON(!ctrl->gates)) {
- kfree(ctrl);
- return;
- }
+ if (WARN_ON(!ctrl->gates))
+ goto gates_out;
for (n = 0; n < ctrl->num_gates; n++) {
const char *parent =
@@ -160,4 +160,10 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
}
of_clk_add_provider(np, clk_gating_get_src, ctrl);
+
+ return;
+gates_out:
+ kfree(ctrl);
+ctrl_out:
+ iounmap(base);
}
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 79d7aed..38aee1e 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -74,12 +74,12 @@
enum { DOVE_CPU_TO_L2, DOVE_CPU_TO_DDR };
-static const struct coreclk_ratio __initconst dove_coreclk_ratios[] = {
+static const struct coreclk_ratio dove_coreclk_ratios[] __initconst = {
{ .id = DOVE_CPU_TO_L2, .name = "l2clk", },
{ .id = DOVE_CPU_TO_DDR, .name = "ddrclk", }
};
-static const u32 __initconst dove_tclk_freqs[] = {
+static const u32 dove_tclk_freqs[] __initconst = {
166666667,
125000000,
0, 0
@@ -92,7 +92,7 @@ static u32 __init dove_get_tclk_freq(void __iomem *sar)
return dove_tclk_freqs[opt];
}
-static const u32 __initconst dove_cpu_freqs[] = {
+static const u32 dove_cpu_freqs[] __initconst = {
0, 0, 0, 0, 0,
1000000000,
933333333, 933333333,
@@ -111,12 +111,12 @@ static u32 __init dove_get_cpu_freq(void __iomem *sar)
return dove_cpu_freqs[opt];
}
-static const int __initconst dove_cpu_l2_ratios[8][2] = {
+static const int dove_cpu_l2_ratios[8][2] __initconst = {
{ 1, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 }
};
-static const int __initconst dove_cpu_ddr_ratios[16][2] = {
+static const int dove_cpu_ddr_ratios[16][2] __initconst = {
{ 1, 1 }, { 0, 1 }, { 1, 2 }, { 2, 5 },
{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 },
{ 1, 5 }, { 0, 1 }, { 1, 6 }, { 0, 1 },
@@ -164,7 +164,7 @@ CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init);
* Clock Gating Control
*/
-static const struct clk_gating_soc_desc __initconst dove_gating_desc[] = {
+static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = {
{ "usb0", NULL, 0, 0 },
{ "usb1", NULL, 1, 0 },
{ "ge", "gephy", 2, 0 },
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 71d2461..2636a55 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -78,7 +78,7 @@
enum { KIRKWOOD_CPU_TO_L2, KIRKWOOD_CPU_TO_DDR };
-static const struct coreclk_ratio __initconst kirkwood_coreclk_ratios[] = {
+static const struct coreclk_ratio kirkwood_coreclk_ratios[] __initconst = {
{ .id = KIRKWOOD_CPU_TO_L2, .name = "l2clk", },
{ .id = KIRKWOOD_CPU_TO_DDR, .name = "ddrclk", }
};
@@ -90,7 +90,7 @@ static u32 __init kirkwood_get_tclk_freq(void __iomem *sar)
return (opt) ? 166666667 : 200000000;
}
-static const u32 __initconst kirkwood_cpu_freqs[] = {
+static const u32 kirkwood_cpu_freqs[] __initconst = {
0, 0, 0, 0,
600000000,
0,
@@ -111,12 +111,12 @@ static u32 __init kirkwood_get_cpu_freq(void __iomem *sar)
return kirkwood_cpu_freqs[opt];
}
-static const int __initconst kirkwood_cpu_l2_ratios[8][2] = {
+static const int kirkwood_cpu_l2_ratios[8][2] __initconst = {
{ 0, 1 }, { 1, 2 }, { 0, 1 }, { 1, 3 },
{ 0, 1 }, { 1, 4 }, { 0, 1 }, { 0, 1 }
};
-static const int __initconst kirkwood_cpu_ddr_ratios[16][2] = {
+static const int kirkwood_cpu_ddr_ratios[16][2] __initconst = {
{ 0, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 2, 9 },
{ 1, 5 }, { 1, 6 }, { 0, 1 }, { 0, 1 },
@@ -145,7 +145,7 @@ static void __init kirkwood_get_clk_ratio(
}
}
-static const u32 __initconst mv88f6180_cpu_freqs[] = {
+static const u32 mv88f6180_cpu_freqs[] __initconst = {
0, 0, 0, 0, 0,
600000000,
800000000,
@@ -158,7 +158,7 @@ static u32 __init mv88f6180_get_cpu_freq(void __iomem *sar)
return mv88f6180_cpu_freqs[opt];
}
-static const int __initconst mv88f6180_cpu_ddr_ratios[8][2] = {
+static const int mv88f6180_cpu_ddr_ratios[8][2] __initconst = {
{ 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
{ 0, 1 }, { 1, 3 }, { 1, 4 }, { 1, 5 }
};
@@ -219,7 +219,7 @@ CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock",
* Clock Gating Control
*/
-static const struct clk_gating_soc_desc __initconst kirkwood_gating_desc[] = {
+static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = {
{ "ge0", NULL, 0, 0 },
{ "pex0", NULL, 2, 0 },
{ "usb0", NULL, 3, 0 },
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index f6a7487..c396fe3 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
index 81421e2..ef10ad9 100644
--- a/drivers/clk/mxs/clk.h
+++ b/drivers/clk/mxs/clk.h
@@ -52,8 +52,8 @@ static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char **parent_names, int num_parents)
{
return clk_register_mux(NULL, name, parent_names, num_parents,
- CLK_SET_RATE_PARENT, reg, shift, width,
- 0, &mxs_lock);
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ reg, shift, width, 0, &mxs_lock);
}
static inline struct clk *mxs_clk_fixed_factor(const char *name,
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 5d4d432..3413380 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -8,3 +8,6 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
+ifdef CONFIG_COMMON_CLK
+obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o
+endif
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 9b1bbd5..39b40aa 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -62,7 +62,7 @@ static struct syscore_ops exynos_audss_clk_syscore_ops = {
#endif /* CONFIG_PM_SLEEP */
/* register exynos_audss clocks */
-void __init exynos_audss_clk_init(struct device_node *np)
+static void __init exynos_audss_clk_init(struct device_node *np)
{
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -82,11 +82,13 @@ void __init exynos_audss_clk_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
- mout_audss_p, ARRAY_SIZE(mout_audss_p), 0,
+ mout_audss_p, ARRAY_SIZE(mout_audss_p),
+ CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s",
- mout_i2s_p, ARRAY_SIZE(mout_i2s_p), 0,
+ mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
+ CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp",
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 4e57397..ad5ff50 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -17,7 +17,6 @@
#include <linux/of_address.h>
#include "clk.h"
-#include "clk-pll.h"
/* Exynos4 clock controller register offsets */
#define SRC_LEFTBUS 0x4200
@@ -97,12 +96,15 @@
#define GATE_IP_PERIL 0xc950
#define E4210_GATE_IP_PERIR 0xc960
#define GATE_BLOCK 0xc970
+#define E4X12_MPLL_LOCK 0x10008
#define E4X12_MPLL_CON0 0x10108
#define SRC_DMC 0x10200
#define SRC_MASK_DMC 0x10300
#define DIV_DMC0 0x10500
#define DIV_DMC1 0x10504
#define GATE_IP_DMC 0x10900
+#define APLL_LOCK 0x14000
+#define E4210_MPLL_LOCK 0x14008
#define APLL_CON0 0x14100
#define E4210_MPLL_CON0 0x14108
#define SRC_CPU 0x14200
@@ -121,6 +123,12 @@ enum exynos4_soc {
EXYNOS4X12,
};
+/* list of PLLs to be registered */
+enum exynos4_plls {
+ apll, mpll, epll, vpll,
+ nr_plls /* number of PLLs */
+};
+
/*
* Let each supported clock get a unique id. This id is used to lookup the clock
* for device tree based platforms. The clocks are categorized into three
@@ -169,7 +177,7 @@ enum exynos4_clks {
gicisp, smmu_isp, smmu_drc, smmu_fd, smmu_lite0, smmu_lite1, mcuctl_isp,
mpwm_isp, i2c0_isp, i2c1_isp, mtcadc_isp, pwm_isp, wdt_isp, uart_isp,
asyncaxim, smmu_ispcx, spi0_isp, spi1_isp, pwm_isp_sclk, spi0_isp_sclk,
- spi1_isp_sclk, uart_isp_sclk,
+ spi1_isp_sclk, uart_isp_sclk, tmu_apbif,
/* mux clocks */
mout_fimc0 = 384, mout_fimc1, mout_fimc2, mout_fimc3, mout_cam0,
@@ -187,7 +195,7 @@ enum exynos4_clks {
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static __initdata unsigned long exynos4210_clk_save[] = {
+static unsigned long exynos4210_clk_save[] __initdata = {
E4210_SRC_IMAGE,
E4210_SRC_LCD1,
E4210_SRC_MASK_LCD1,
@@ -198,7 +206,7 @@ static __initdata unsigned long exynos4210_clk_save[] = {
E4210_MPLL_CON0,
};
-static __initdata unsigned long exynos4x12_clk_save[] = {
+static unsigned long exynos4x12_clk_save[] __initdata = {
E4X12_GATE_IP_IMAGE,
E4X12_GATE_IP_PERIR,
E4X12_SRC_CAM1,
@@ -207,7 +215,7 @@ static __initdata unsigned long exynos4x12_clk_save[] = {
E4X12_MPLL_CON0,
};
-static __initdata unsigned long exynos4_clk_regs[] = {
+static unsigned long exynos4_clk_regs[] __initdata = {
SRC_LEFTBUS,
DIV_LEFTBUS,
GATE_IP_LEFTBUS,
@@ -338,24 +346,24 @@ PNAME(mout_user_aclk200_p4x12) = {"fin_pll", "div_aclk200", };
PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", };
/* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
FRATE(xxti, "xxti", NULL, CLK_IS_ROOT, 0),
FRATE(xusbxti, "xusbxti", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
FRATE(none, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
FRATE(none, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
};
-struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
FRATE(none, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
};
/* list of mux clocks supported in all exynos4 soc's */
-struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
MUX_FA(mout_apll, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
CLK_SET_RATE_PARENT, 0, "mout_apll"),
MUX(none, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
@@ -367,17 +375,20 @@ struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
CLK_SET_RATE_PARENT, 0),
MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2),
MUX(none, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
- MUX_A(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1, "sclk_epll"),
+ MUX(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
MUX(none, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
};
/* list of mux clocks supported in exynos4210 soc */
-struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos4210_mux_early[] __initdata = {
+ MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+};
+
+static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
MUX(none, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
MUX(none, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
MUX(none, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
MUX(none, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
- MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
MUX(none, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
MUX(none, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
MUX(none, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
@@ -385,11 +396,9 @@ struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
MUX(none, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1),
MUX(none, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4),
MUX(none, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
- MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1, "mout_mpll"),
- MUX_A(mout_core, "mout_core", mout_core_p4210,
- SRC_CPU, 16, 1, "moutcore"),
- MUX_A(sclk_vpll, "sclk_vpll", sclk_vpll_p4210,
- SRC_TOP0, 8, 1, "sclk_vpll"),
+ MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
+ MUX(mout_core, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
+ MUX(sclk_vpll, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
MUX(mout_fimc0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
MUX(mout_fimc1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
MUX(mout_fimc2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4),
@@ -423,9 +432,9 @@ struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
};
/* list of mux clocks supported in exynos4x12 soc */
-struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
- MUX_A(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
- SRC_CPU, 24, 1, "mout_mpll"),
+static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
+ MUX(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
+ SRC_CPU, 24, 1),
MUX(none, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
MUX(none, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
MUX(mout_mpll_user_t, "mout_mpll_user_t", mout_mpll_user_p4x12,
@@ -445,12 +454,9 @@ struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
MUX(none, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1),
MUX(none, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1),
MUX(none, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1),
- MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p,
- SRC_DMC, 12, 1, "sclk_mpll"),
- MUX_A(sclk_vpll, "sclk_vpll", mout_vpll_p,
- SRC_TOP0, 8, 1, "sclk_vpll"),
- MUX_A(mout_core, "mout_core", mout_core_p4x12,
- SRC_CPU, 16, 1, "moutcore"),
+ MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
+ MUX(sclk_vpll, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
+ MUX(mout_core, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
MUX(mout_fimc0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
MUX(mout_fimc1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
MUX(mout_fimc2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
@@ -491,7 +497,7 @@ struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
};
/* list of divider clocks supported in all exynos4 soc's */
-struct samsung_div_clock exynos4_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4_div_clks[] __initdata = {
DIV(none, "div_core", "mout_core", DIV_CPU0, 0, 3),
DIV(none, "div_core2", "div_core", DIV_CPU0, 28, 3),
DIV(none, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
@@ -538,9 +544,8 @@ struct samsung_div_clock exynos4_div_clks[] __initdata = {
DIV(none, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
DIV(none, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
DIV(none, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
- DIV_A(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3, "armclk"),
- DIV_A(sclk_apll, "sclk_apll", "mout_apll",
- DIV_CPU0, 24, 3, "sclk_apll"),
+ DIV(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3),
+ DIV(sclk_apll, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
DIV_F(none, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
CLK_SET_RATE_PARENT, 0),
DIV_F(none, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8,
@@ -554,7 +559,7 @@ struct samsung_div_clock exynos4_div_clks[] __initdata = {
};
/* list of divider clocks supported in exynos4210 soc */
-struct samsung_div_clock exynos4210_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4210_div_clks[] __initdata = {
DIV(aclk200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4),
DIV(none, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
@@ -565,7 +570,7 @@ struct samsung_div_clock exynos4210_div_clks[] __initdata = {
};
/* list of divider clocks supported in exynos4x12 soc */
-struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
DIV(none, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
DIV(none, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
DIV(none, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
@@ -594,7 +599,7 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
};
/* list of gate clocks supported in all exynos4 soc's */
-struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
/*
* After all Exynos4 based platforms are migrated to use device tree,
* the device name and clock alias names specified below for some
@@ -629,164 +634,151 @@ struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
CLK_SET_RATE_PARENT, 0),
GATE(sclk_audio1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0,
CLK_SET_RATE_PARENT, 0),
- GATE_D(vp, "s5p-mixer", "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
- GATE_D(mixer, "s5p-mixer", "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
- GATE_D(hdmi, "exynos4-hdmi", "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
- GATE_A(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0, "timers"),
- GATE_A(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0, "biu"),
- GATE_A(usb_host, "usb_host", "aclk133",
- GATE_IP_FSYS, 12, 0, 0, "usbhost"),
- GATE_DA(sclk_fimc0, "exynos4-fimc.0", "sclk_fimc0", "div_fimc0",
- SRC_MASK_CAM, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
- GATE_DA(sclk_fimc1, "exynos4-fimc.1", "sclk_fimc1", "div_fimc1",
- SRC_MASK_CAM, 4, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
- GATE_DA(sclk_fimc2, "exynos4-fimc.2", "sclk_fimc2", "div_fimc2",
- SRC_MASK_CAM, 8, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
- GATE_DA(sclk_fimc3, "exynos4-fimc.3", "sclk_fimc3", "div_fimc3",
- SRC_MASK_CAM, 12, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
- GATE_DA(sclk_csis0, "s5p-mipi-csis.0", "sclk_csis0", "div_csis0",
- SRC_MASK_CAM, 24, CLK_SET_RATE_PARENT, 0, "sclk_csis"),
- GATE_DA(sclk_csis1, "s5p-mipi-csis.1", "sclk_csis1", "div_csis1",
- SRC_MASK_CAM, 28, CLK_SET_RATE_PARENT, 0, "sclk_csis"),
- GATE_DA(sclk_fimd0, "exynos4-fb.0", "sclk_fimd0", "div_fimd0",
- SRC_MASK_LCD0, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"),
- GATE_DA(sclk_mmc0, "exynos4-sdhci.0", "sclk_mmc0", "div_mmc_pre0",
- SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0,
- "mmc_busclk.2"),
- GATE_DA(sclk_mmc1, "exynos4-sdhci.1", "sclk_mmc1", "div_mmc_pre1",
- SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0,
- "mmc_busclk.2"),
- GATE_DA(sclk_mmc2, "exynos4-sdhci.2", "sclk_mmc2", "div_mmc_pre2",
- SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0,
- "mmc_busclk.2"),
- GATE_DA(sclk_mmc3, "exynos4-sdhci.3", "sclk_mmc3", "div_mmc_pre3",
- SRC_MASK_FSYS, 12, CLK_SET_RATE_PARENT, 0,
- "mmc_busclk.2"),
- GATE_DA(sclk_mmc4, NULL, "sclk_mmc4", "div_mmc_pre4",
- SRC_MASK_FSYS, 16, CLK_SET_RATE_PARENT, 0, "ciu"),
- GATE_DA(sclk_uart0, "exynos4210-uart.0", "uclk0", "div_uart0",
- SRC_MASK_PERIL0, 0, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
- GATE_DA(sclk_uart1, "exynos4210-uart.1", "uclk1", "div_uart1",
- SRC_MASK_PERIL0, 4, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
- GATE_DA(sclk_uart2, "exynos4210-uart.2", "uclk2", "div_uart2",
- SRC_MASK_PERIL0, 8, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
- GATE_DA(sclk_uart3, "exynos4210-uart.3", "uclk3", "div_uart3",
- SRC_MASK_PERIL0, 12, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
- GATE_DA(sclk_uart4, "exynos4210-uart.4", "uclk4", "div_uart4",
- SRC_MASK_PERIL0, 16, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
+ GATE(vp, "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
+ GATE(mixer, "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
+ GATE(hdmi, "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
+ GATE(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0),
+ GATE(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0),
+ GATE(usb_host, "usb_host", "aclk133", GATE_IP_FSYS, 12, 0, 0),
+ GATE(sclk_fimc0, "sclk_fimc0", "div_fimc0", SRC_MASK_CAM, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimc1, "sclk_fimc1", "div_fimc1", SRC_MASK_CAM, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimc2, "sclk_fimc2", "div_fimc2", SRC_MASK_CAM, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimc3, "sclk_fimc3", "div_fimc3", SRC_MASK_CAM, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_csis0, "sclk_csis0", "div_csis0", SRC_MASK_CAM, 24,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_csis1, "sclk_csis1", "div_csis1", SRC_MASK_CAM, 28,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimd0, "sclk_fimd0", "div_fimd0", SRC_MASK_LCD0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc0, "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc1, "sclk_mmc1", "div_mmc_pre1", SRC_MASK_FSYS, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc2, "sclk_mmc2", "div_mmc_pre2", SRC_MASK_FSYS, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc3, "sclk_mmc3", "div_mmc_pre3", SRC_MASK_FSYS, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc4, "sclk_mmc4", "div_mmc_pre4", SRC_MASK_FSYS, 16,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart0, "uclk0", "div_uart0", SRC_MASK_PERIL0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart1, "uclk1", "div_uart1", SRC_MASK_PERIL0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart2, "uclk2", "div_uart2", SRC_MASK_PERIL0, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart3, "uclk3", "div_uart3", SRC_MASK_PERIL0, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart4, "uclk4", "div_uart4", SRC_MASK_PERIL0, 16,
+ CLK_SET_RATE_PARENT, 0),
GATE(sclk_audio2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4,
CLK_SET_RATE_PARENT, 0),
- GATE_DA(sclk_spi0, "exynos4210-spi.0", "sclk_spi0", "div_spi_pre0",
- SRC_MASK_PERIL1, 16, CLK_SET_RATE_PARENT,
- 0, "spi_busclk0"),
- GATE_DA(sclk_spi1, "exynos4210-spi.1", "sclk_spi1", "div_spi_pre1",
- SRC_MASK_PERIL1, 20, CLK_SET_RATE_PARENT,
- 0, "spi_busclk0"),
- GATE_DA(sclk_spi2, "exynos4210-spi.2", "sclk_spi2", "div_spi_pre2",
- SRC_MASK_PERIL1, 24, CLK_SET_RATE_PARENT,
- 0, "spi_busclk0"),
- GATE_DA(fimc0, "exynos4-fimc.0", "fimc0", "aclk160",
- GATE_IP_CAM, 0, 0, 0, "fimc"),
- GATE_DA(fimc1, "exynos4-fimc.1", "fimc1", "aclk160",
- GATE_IP_CAM, 1, 0, 0, "fimc"),
- GATE_DA(fimc2, "exynos4-fimc.2", "fimc2", "aclk160",
- GATE_IP_CAM, 2, 0, 0, "fimc"),
- GATE_DA(fimc3, "exynos4-fimc.3", "fimc3", "aclk160",
- GATE_IP_CAM, 3, 0, 0, "fimc"),
- GATE_DA(csis0, "s5p-mipi-csis.0", "csis0", "aclk160",
- GATE_IP_CAM, 4, 0, 0, "fimc"),
- GATE_DA(csis1, "s5p-mipi-csis.1", "csis1", "aclk160",
- GATE_IP_CAM, 5, 0, 0, "fimc"),
- GATE_DA(smmu_fimc0, "exynos-sysmmu.5", "smmu_fimc0", "aclk160",
- GATE_IP_CAM, 7, 0, 0, "sysmmu"),
- GATE_DA(smmu_fimc1, "exynos-sysmmu.6", "smmu_fimc1", "aclk160",
- GATE_IP_CAM, 8, 0, 0, "sysmmu"),
- GATE_DA(smmu_fimc2, "exynos-sysmmu.7", "smmu_fimc2", "aclk160",
- GATE_IP_CAM, 9, 0, 0, "sysmmu"),
- GATE_DA(smmu_fimc3, "exynos-sysmmu.8", "smmu_fimc3", "aclk160",
- GATE_IP_CAM, 10, 0, 0, "sysmmu"),
- GATE_DA(smmu_jpeg, "exynos-sysmmu.3", "smmu_jpeg", "aclk160",
- GATE_IP_CAM, 11, 0, 0, "sysmmu"),
+ GATE(sclk_spi0, "sclk_spi0", "div_spi_pre0", SRC_MASK_PERIL1, 16,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spi1, "sclk_spi1", "div_spi_pre1", SRC_MASK_PERIL1, 20,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spi2, "sclk_spi2", "div_spi_pre2", SRC_MASK_PERIL1, 24,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(fimc0, "fimc0", "aclk160", GATE_IP_CAM, 0,
+ 0, 0),
+ GATE(fimc1, "fimc1", "aclk160", GATE_IP_CAM, 1,
+ 0, 0),
+ GATE(fimc2, "fimc2", "aclk160", GATE_IP_CAM, 2,
+ 0, 0),
+ GATE(fimc3, "fimc3", "aclk160", GATE_IP_CAM, 3,
+ 0, 0),
+ GATE(csis0, "csis0", "aclk160", GATE_IP_CAM, 4,
+ 0, 0),
+ GATE(csis1, "csis1", "aclk160", GATE_IP_CAM, 5,
+ 0, 0),
+ GATE(smmu_fimc0, "smmu_fimc0", "aclk160", GATE_IP_CAM, 7,
+ 0, 0),
+ GATE(smmu_fimc1, "smmu_fimc1", "aclk160", GATE_IP_CAM, 8,
+ 0, 0),
+ GATE(smmu_fimc2, "smmu_fimc2", "aclk160", GATE_IP_CAM, 9,
+ 0, 0),
+ GATE(smmu_fimc3, "smmu_fimc3", "aclk160", GATE_IP_CAM, 10,
+ 0, 0),
+ GATE(smmu_jpeg, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
+ 0, 0),
GATE(pixelasyncm0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
GATE(pixelasyncm1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
- GATE_DA(smmu_tv, "exynos-sysmmu.2", "smmu_tv", "aclk160",
- GATE_IP_TV, 4, 0, 0, "sysmmu"),
- GATE_DA(mfc, "s5p-mfc", "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0, "mfc"),
- GATE_DA(smmu_mfcl, "exynos-sysmmu.0", "smmu_mfcl", "aclk100",
- GATE_IP_MFC, 1, 0, 0, "sysmmu"),
- GATE_DA(smmu_mfcr, "exynos-sysmmu.1", "smmu_mfcr", "aclk100",
- GATE_IP_MFC, 2, 0, 0, "sysmmu"),
- GATE_DA(fimd0, "exynos4-fb.0", "fimd0", "aclk160",
- GATE_IP_LCD0, 0, 0, 0, "fimd"),
- GATE_DA(smmu_fimd0, "exynos-sysmmu.10", "smmu_fimd0", "aclk160",
- GATE_IP_LCD0, 4, 0, 0, "sysmmu"),
- GATE_DA(pdma0, "dma-pl330.0", "pdma0", "aclk133",
- GATE_IP_FSYS, 0, 0, 0, "dma"),
- GATE_DA(pdma1, "dma-pl330.1", "pdma1", "aclk133",
- GATE_IP_FSYS, 1, 0, 0, "dma"),
- GATE_DA(sdmmc0, "exynos4-sdhci.0", "sdmmc0", "aclk133",
- GATE_IP_FSYS, 5, 0, 0, "hsmmc"),
- GATE_DA(sdmmc1, "exynos4-sdhci.1", "sdmmc1", "aclk133",
- GATE_IP_FSYS, 6, 0, 0, "hsmmc"),
- GATE_DA(sdmmc2, "exynos4-sdhci.2", "sdmmc2", "aclk133",
- GATE_IP_FSYS, 7, 0, 0, "hsmmc"),
- GATE_DA(sdmmc3, "exynos4-sdhci.3", "sdmmc3", "aclk133",
- GATE_IP_FSYS, 8, 0, 0, "hsmmc"),
- GATE_DA(uart0, "exynos4210-uart.0", "uart0", "aclk100",
- GATE_IP_PERIL, 0, 0, 0, "uart"),
- GATE_DA(uart1, "exynos4210-uart.1", "uart1", "aclk100",
- GATE_IP_PERIL, 1, 0, 0, "uart"),
- GATE_DA(uart2, "exynos4210-uart.2", "uart2", "aclk100",
- GATE_IP_PERIL, 2, 0, 0, "uart"),
- GATE_DA(uart3, "exynos4210-uart.3", "uart3", "aclk100",
- GATE_IP_PERIL, 3, 0, 0, "uart"),
- GATE_DA(uart4, "exynos4210-uart.4", "uart4", "aclk100",
- GATE_IP_PERIL, 4, 0, 0, "uart"),
- GATE_DA(i2c0, "s3c2440-i2c.0", "i2c0", "aclk100",
- GATE_IP_PERIL, 6, 0, 0, "i2c"),
- GATE_DA(i2c1, "s3c2440-i2c.1", "i2c1", "aclk100",
- GATE_IP_PERIL, 7, 0, 0, "i2c"),
- GATE_DA(i2c2, "s3c2440-i2c.2", "i2c2", "aclk100",
- GATE_IP_PERIL, 8, 0, 0, "i2c"),
- GATE_DA(i2c3, "s3c2440-i2c.3", "i2c3", "aclk100",
- GATE_IP_PERIL, 9, 0, 0, "i2c"),
- GATE_DA(i2c4, "s3c2440-i2c.4", "i2c4", "aclk100",
- GATE_IP_PERIL, 10, 0, 0, "i2c"),
- GATE_DA(i2c5, "s3c2440-i2c.5", "i2c5", "aclk100",
- GATE_IP_PERIL, 11, 0, 0, "i2c"),
- GATE_DA(i2c6, "s3c2440-i2c.6", "i2c6", "aclk100",
- GATE_IP_PERIL, 12, 0, 0, "i2c"),
- GATE_DA(i2c7, "s3c2440-i2c.7", "i2c7", "aclk100",
- GATE_IP_PERIL, 13, 0, 0, "i2c"),
- GATE_DA(i2c_hdmi, "s3c2440-hdmiphy-i2c", "i2c-hdmi", "aclk100",
- GATE_IP_PERIL, 14, 0, 0, "i2c"),
- GATE_DA(spi0, "exynos4210-spi.0", "spi0", "aclk100",
- GATE_IP_PERIL, 16, 0, 0, "spi"),
- GATE_DA(spi1, "exynos4210-spi.1", "spi1", "aclk100",
- GATE_IP_PERIL, 17, 0, 0, "spi"),
- GATE_DA(spi2, "exynos4210-spi.2", "spi2", "aclk100",
- GATE_IP_PERIL, 18, 0, 0, "spi"),
- GATE_DA(i2s1, "samsung-i2s.1", "i2s1", "aclk100",
- GATE_IP_PERIL, 20, 0, 0, "iis"),
- GATE_DA(i2s2, "samsung-i2s.2", "i2s2", "aclk100",
- GATE_IP_PERIL, 21, 0, 0, "iis"),
- GATE_DA(pcm1, "samsung-pcm.1", "pcm1", "aclk100",
- GATE_IP_PERIL, 22, 0, 0, "pcm"),
- GATE_DA(pcm2, "samsung-pcm.2", "pcm2", "aclk100",
- GATE_IP_PERIL, 23, 0, 0, "pcm"),
- GATE_DA(spdif, "samsung-spdif", "spdif", "aclk100",
- GATE_IP_PERIL, 26, 0, 0, "spdif"),
- GATE_DA(ac97, "samsung-ac97", "ac97", "aclk100",
- GATE_IP_PERIL, 27, 0, 0, "ac97"),
+ GATE(smmu_tv, "smmu_tv", "aclk160", GATE_IP_TV, 4,
+ 0, 0),
+ GATE(mfc, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
+ GATE(smmu_mfcl, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
+ 0, 0),
+ GATE(smmu_mfcr, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
+ 0, 0),
+ GATE(fimd0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
+ 0, 0),
+ GATE(smmu_fimd0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
+ 0, 0),
+ GATE(pdma0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
+ 0, 0),
+ GATE(pdma1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
+ 0, 0),
+ GATE(sdmmc0, "sdmmc0", "aclk133", GATE_IP_FSYS, 5,
+ 0, 0),
+ GATE(sdmmc1, "sdmmc1", "aclk133", GATE_IP_FSYS, 6,
+ 0, 0),
+ GATE(sdmmc2, "sdmmc2", "aclk133", GATE_IP_FSYS, 7,
+ 0, 0),
+ GATE(sdmmc3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
+ 0, 0),
+ GATE(uart0, "uart0", "aclk100", GATE_IP_PERIL, 0,
+ 0, 0),
+ GATE(uart1, "uart1", "aclk100", GATE_IP_PERIL, 1,
+ 0, 0),
+ GATE(uart2, "uart2", "aclk100", GATE_IP_PERIL, 2,
+ 0, 0),
+ GATE(uart3, "uart3", "aclk100", GATE_IP_PERIL, 3,
+ 0, 0),
+ GATE(uart4, "uart4", "aclk100", GATE_IP_PERIL, 4,
+ 0, 0),
+ GATE(i2c0, "i2c0", "aclk100", GATE_IP_PERIL, 6,
+ 0, 0),
+ GATE(i2c1, "i2c1", "aclk100", GATE_IP_PERIL, 7,
+ 0, 0),
+ GATE(i2c2, "i2c2", "aclk100", GATE_IP_PERIL, 8,
+ 0, 0),
+ GATE(i2c3, "i2c3", "aclk100", GATE_IP_PERIL, 9,
+ 0, 0),
+ GATE(i2c4, "i2c4", "aclk100", GATE_IP_PERIL, 10,
+ 0, 0),
+ GATE(i2c5, "i2c5", "aclk100", GATE_IP_PERIL, 11,
+ 0, 0),
+ GATE(i2c6, "i2c6", "aclk100", GATE_IP_PERIL, 12,
+ 0, 0),
+ GATE(i2c7, "i2c7", "aclk100", GATE_IP_PERIL, 13,
+ 0, 0),
+ GATE(i2c_hdmi, "i2c-hdmi", "aclk100", GATE_IP_PERIL, 14,
+ 0, 0),
+ GATE(spi0, "spi0", "aclk100", GATE_IP_PERIL, 16,
+ 0, 0),
+ GATE(spi1, "spi1", "aclk100", GATE_IP_PERIL, 17,
+ 0, 0),
+ GATE(spi2, "spi2", "aclk100", GATE_IP_PERIL, 18,
+ 0, 0),
+ GATE(i2s1, "i2s1", "aclk100", GATE_IP_PERIL, 20,
+ 0, 0),
+ GATE(i2s2, "i2s2", "aclk100", GATE_IP_PERIL, 21,
+ 0, 0),
+ GATE(pcm1, "pcm1", "aclk100", GATE_IP_PERIL, 22,
+ 0, 0),
+ GATE(pcm2, "pcm2", "aclk100", GATE_IP_PERIL, 23,
+ 0, 0),
+ GATE(spdif, "spdif", "aclk100", GATE_IP_PERIL, 26,
+ 0, 0),
+ GATE(ac97, "ac97", "aclk100", GATE_IP_PERIL, 27,
+ 0, 0),
};
/* list of gate clocks supported in exynos4210 soc */
-struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
GATE(tvenc, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
GATE(g2d, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
GATE(rotator, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
@@ -811,17 +803,23 @@ struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
GATE(sclk_mixer, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0),
GATE(sclk_dac, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0),
- GATE_A(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15, 0, 0, "adc"),
- GATE_A(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13, 0, 0, "mct"),
- GATE_A(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14, 0, 0, "watchdog"),
- GATE_A(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15, 0, 0, "rtc"),
- GATE_A(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16, 0, 0, "keypad"),
- GATE_DA(sclk_fimd1, "exynos4-fb.1", "sclk_fimd1", "div_fimd1",
- E4210_SRC_MASK_LCD1, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"),
+ GATE(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15,
+ 0, 0),
+ GATE(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13,
+ 0, 0),
+ GATE(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14,
+ 0, 0),
+ GATE(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15,
+ 0, 0),
+ GATE(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16,
+ 0, 0),
+ GATE(sclk_fimd1, "sclk_fimd1", "div_fimd1", E4210_SRC_MASK_LCD1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(tmu_apbif, "tmu_apbif", "aclk100", E4210_GATE_IP_PERIR, 17, 0, 0),
};
/* list of gate clocks supported in exynos4x12 soc */
-struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE(audss, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
GATE(mdnie0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
GATE(rotator, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
@@ -840,10 +838,11 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
GATE(smmu_rotator, "smmu_rotator", "aclk200",
E4X12_GATE_IP_IMAGE, 4, 0, 0),
- GATE_A(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13, 0, 0, "mct"),
- GATE_A(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15, 0, 0, "rtc"),
- GATE_A(keyif, "keyif", "aclk100",
- E4X12_GATE_IP_PERIR, 16, 0, 0, "keypad"),
+ GATE(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13,
+ 0, 0),
+ GATE(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
+ 0, 0),
+ GATE(keyif, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
GATE(sclk_pwm_isp, "sclk_pwm_isp", "div_pwm_isp",
E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
GATE(sclk_spi0_isp, "sclk_spi0_isp", "div_spi0_isp_pre",
@@ -860,12 +859,11 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
E4X12_GATE_IP_ISP, 2, 0, 0),
GATE(uart_isp_sclk, "uart_isp_sclk", "sclk_uart_isp",
E4X12_GATE_IP_ISP, 3, 0, 0),
- GATE_A(wdt, "watchdog", "aclk100",
- E4X12_GATE_IP_PERIR, 14, 0, 0, "watchdog"),
- GATE_DA(pcm0, "samsung-pcm.0", "pcm0", "aclk100",
- E4X12_GATE_IP_MAUDIO, 2, 0, 0, "pcm"),
- GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
- E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
+ GATE(wdt, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
+ GATE(pcm0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
+ 0, 0),
+ GATE(i2s0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
+ 0, 0),
GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
@@ -919,6 +917,21 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+ GATE(tmu_apbif, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0, 0),
+};
+
+static struct samsung_clock_alias exynos4_aliases[] __initdata = {
+ ALIAS(mout_core, NULL, "moutcore"),
+ ALIAS(arm_clk, NULL, "armclk"),
+ ALIAS(sclk_apll, NULL, "mout_apll"),
+};
+
+static struct samsung_clock_alias exynos4210_aliases[] __initdata = {
+ ALIAS(sclk_mpll, NULL, "mout_mpll"),
+};
+
+static struct samsung_clock_alias exynos4x12_aliases[] __initdata = {
+ ALIAS(mout_mpll_user_c, NULL, "mout_mpll"),
};
/*
@@ -973,36 +986,116 @@ static void __init exynos4_clk_register_finpll(unsigned long xom)
}
-/*
- * This function allows non-dt platforms to specify the clock speed of the
- * xxti and xusbxti clocks. These clocks are then registered with the specified
- * clock speed.
- */
-void __init exynos4_clk_register_fixed_ext(unsigned long xxti_f,
- unsigned long xusbxti_f)
-{
- exynos4_fixed_rate_ext_clks[0].fixed_rate = xxti_f;
- exynos4_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f;
- samsung_clk_register_fixed_rate(exynos4_fixed_rate_ext_clks,
- ARRAY_SIZE(exynos4_fixed_rate_ext_clks));
-}
-
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
{ .compatible = "samsung,clock-xusbxti", .data = (void *)1, },
{},
};
+/* PLLs PMS values */
+static struct samsung_pll_rate_table exynos4210_apll_rates[] __initdata = {
+ PLL_45XX_RATE(1200000000, 150, 3, 1, 28),
+ PLL_45XX_RATE(1000000000, 250, 6, 1, 28),
+ PLL_45XX_RATE( 800000000, 200, 6, 1, 28),
+ PLL_45XX_RATE( 666857142, 389, 14, 1, 13),
+ PLL_45XX_RATE( 600000000, 100, 4, 1, 13),
+ PLL_45XX_RATE( 533000000, 533, 24, 1, 5),
+ PLL_45XX_RATE( 500000000, 250, 6, 2, 28),
+ PLL_45XX_RATE( 400000000, 200, 6, 2, 28),
+ PLL_45XX_RATE( 200000000, 200, 6, 3, 28),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4210_epll_rates[] __initdata = {
+ PLL_4600_RATE(192000000, 48, 3, 1, 0, 0),
+ PLL_4600_RATE(180633605, 45, 3, 1, 10381, 0),
+ PLL_4600_RATE(180000000, 45, 3, 1, 0, 0),
+ PLL_4600_RATE( 73727996, 73, 3, 3, 47710, 1),
+ PLL_4600_RATE( 67737602, 90, 4, 3, 20762, 1),
+ PLL_4600_RATE( 49151992, 49, 3, 3, 9961, 0),
+ PLL_4600_RATE( 45158401, 45, 3, 3, 10381, 0),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4210_vpll_rates[] __initdata = {
+ PLL_4650_RATE(360000000, 44, 3, 0, 1024, 0, 14, 0),
+ PLL_4650_RATE(324000000, 53, 2, 1, 1024, 1, 1, 1),
+ PLL_4650_RATE(259617187, 63, 3, 1, 1950, 0, 20, 1),
+ PLL_4650_RATE(110000000, 53, 3, 2, 2048, 0, 17, 0),
+ PLL_4650_RATE( 55360351, 53, 3, 3, 2417, 0, 17, 0),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_apll_rates[] __initdata = {
+ PLL_35XX_RATE(1500000000, 250, 4, 0),
+ PLL_35XX_RATE(1400000000, 175, 3, 0),
+ PLL_35XX_RATE(1300000000, 325, 6, 0),
+ PLL_35XX_RATE(1200000000, 200, 4, 0),
+ PLL_35XX_RATE(1100000000, 275, 6, 0),
+ PLL_35XX_RATE(1000000000, 125, 3, 0),
+ PLL_35XX_RATE( 900000000, 150, 4, 0),
+ PLL_35XX_RATE( 800000000, 100, 3, 0),
+ PLL_35XX_RATE( 700000000, 175, 3, 1),
+ PLL_35XX_RATE( 600000000, 200, 4, 1),
+ PLL_35XX_RATE( 500000000, 125, 3, 1),
+ PLL_35XX_RATE( 400000000, 100, 3, 1),
+ PLL_35XX_RATE( 300000000, 200, 4, 2),
+ PLL_35XX_RATE( 200000000, 100, 3, 2),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_epll_rates[] __initdata = {
+ PLL_36XX_RATE(192000000, 48, 3, 1, 0),
+ PLL_36XX_RATE(180633605, 45, 3, 1, 10381),
+ PLL_36XX_RATE(180000000, 45, 3, 1, 0),
+ PLL_36XX_RATE( 73727996, 73, 3, 3, 47710),
+ PLL_36XX_RATE( 67737602, 90, 4, 3, 20762),
+ PLL_36XX_RATE( 49151992, 49, 3, 3, 9961),
+ PLL_36XX_RATE( 45158401, 45, 3, 3, 10381),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initdata = {
+ PLL_36XX_RATE(533000000, 133, 3, 1, 16384),
+ PLL_36XX_RATE(440000000, 110, 3, 1, 0),
+ PLL_36XX_RATE(350000000, 175, 3, 2, 0),
+ PLL_36XX_RATE(266000000, 133, 3, 2, 0),
+ PLL_36XX_RATE(160000000, 160, 3, 3, 0),
+ PLL_36XX_RATE(106031250, 53, 3, 2, 1024),
+ PLL_36XX_RATE( 53015625, 53, 3, 3, 1024),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_clock exynos4210_plls[nr_plls] __initdata = {
+ [apll] = PLL_A(pll_4508, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, "fout_apll", NULL),
+ [mpll] = PLL_A(pll_4508, fout_mpll, "fout_mpll", "fin_pll",
+ E4210_MPLL_LOCK, E4210_MPLL_CON0, "fout_mpll", NULL),
+ [epll] = PLL_A(pll_4600, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ EPLL_CON0, "fout_epll", NULL),
+ [vpll] = PLL_A(pll_4650c, fout_vpll, "fout_vpll", "mout_vpllsrc",
+ VPLL_LOCK, VPLL_CON0, "fout_vpll", NULL),
+};
+
+static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
+ [apll] = PLL(pll_35xx, fout_apll, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, NULL),
+ [mpll] = PLL(pll_35xx, fout_mpll, "fout_mpll", "fin_pll",
+ E4X12_MPLL_LOCK, E4X12_MPLL_CON0, NULL),
+ [epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+ [vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "fin_pll",
+ VPLL_LOCK, VPLL_CON0, NULL),
+};
+
/* register exynos4 clocks */
-void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_soc, void __iomem *reg_base, unsigned long xom)
+static void __init exynos4_clk_init(struct device_node *np,
+ enum exynos4_soc exynos4_soc,
+ void __iomem *reg_base, unsigned long xom)
{
- struct clk *apll, *mpll, *epll, *vpll;
-
- if (np) {
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
- }
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
if (exynos4_soc == EXYNOS4210)
samsung_clk_init(np, reg_base, nr_clks,
@@ -1013,37 +1106,42 @@ void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_so
exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save));
- if (np)
- samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
+ samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
ARRAY_SIZE(exynos4_fixed_rate_ext_clks),
ext_clk_match);
exynos4_clk_register_finpll(xom);
if (exynos4_soc == EXYNOS4210) {
- apll = samsung_clk_register_pll45xx("fout_apll", "fin_pll",
- reg_base + APLL_CON0, pll_4508);
- mpll = samsung_clk_register_pll45xx("fout_mpll", "fin_pll",
- reg_base + E4210_MPLL_CON0, pll_4508);
- epll = samsung_clk_register_pll46xx("fout_epll", "fin_pll",
- reg_base + EPLL_CON0, pll_4600);
- vpll = samsung_clk_register_pll46xx("fout_vpll", "mout_vpllsrc",
- reg_base + VPLL_CON0, pll_4650c);
+ samsung_clk_register_mux(exynos4210_mux_early,
+ ARRAY_SIZE(exynos4210_mux_early));
+
+ if (_get_rate("fin_pll") == 24000000) {
+ exynos4210_plls[apll].rate_table =
+ exynos4210_apll_rates;
+ exynos4210_plls[epll].rate_table =
+ exynos4210_epll_rates;
+ }
+
+ if (_get_rate("mout_vpllsrc") == 24000000)
+ exynos4210_plls[vpll].rate_table =
+ exynos4210_vpll_rates;
+
+ samsung_clk_register_pll(exynos4210_plls,
+ ARRAY_SIZE(exynos4210_plls), reg_base);
} else {
- apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
- reg_base + APLL_CON0);
- mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
- reg_base + E4X12_MPLL_CON0);
- epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
- reg_base + EPLL_CON0);
- vpll = samsung_clk_register_pll36xx("fout_vpll", "fin_pll",
- reg_base + VPLL_CON0);
- }
+ if (_get_rate("fin_pll") == 24000000) {
+ exynos4x12_plls[apll].rate_table =
+ exynos4x12_apll_rates;
+ exynos4x12_plls[epll].rate_table =
+ exynos4x12_epll_rates;
+ exynos4x12_plls[vpll].rate_table =
+ exynos4x12_vpll_rates;
+ }
- samsung_clk_add_lookup(apll, fout_apll);
- samsung_clk_add_lookup(mpll, fout_mpll);
- samsung_clk_add_lookup(epll, fout_epll);
- samsung_clk_add_lookup(vpll, fout_vpll);
+ samsung_clk_register_pll(exynos4x12_plls,
+ ARRAY_SIZE(exynos4x12_plls), reg_base);
+ }
samsung_clk_register_fixed_rate(exynos4_fixed_rate_clks,
ARRAY_SIZE(exynos4_fixed_rate_clks));
@@ -1063,6 +1161,8 @@ void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_so
ARRAY_SIZE(exynos4210_div_clks));
samsung_clk_register_gate(exynos4210_gate_clks,
ARRAY_SIZE(exynos4210_gate_clks));
+ samsung_clk_register_alias(exynos4210_aliases,
+ ARRAY_SIZE(exynos4210_aliases));
} else {
samsung_clk_register_mux(exynos4x12_mux_clks,
ARRAY_SIZE(exynos4x12_mux_clks));
@@ -1070,14 +1170,19 @@ void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_so
ARRAY_SIZE(exynos4x12_div_clks));
samsung_clk_register_gate(exynos4x12_gate_clks,
ARRAY_SIZE(exynos4x12_gate_clks));
+ samsung_clk_register_alias(exynos4x12_aliases,
+ ARRAY_SIZE(exynos4x12_aliases));
}
+ samsung_clk_register_alias(exynos4_aliases,
+ ARRAY_SIZE(exynos4_aliases));
+
pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
"\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
- _get_rate("sclk_apll"), _get_rate("mout_mpll"),
+ _get_rate("sclk_apll"), _get_rate("sclk_mpll"),
_get_rate("sclk_epll"), _get_rate("sclk_vpll"),
- _get_rate("armclk"));
+ _get_rate("arm_clk"));
}
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 6f767c5..adf3234 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -17,11 +17,22 @@
#include <linux/of_address.h>
#include "clk.h"
-#include "clk-pll.h"
+#define APLL_LOCK 0x0
+#define APLL_CON0 0x100
#define SRC_CPU 0x200
#define DIV_CPU0 0x500
+#define MPLL_LOCK 0x4000
+#define MPLL_CON0 0x4100
#define SRC_CORE1 0x4204
+#define CPLL_LOCK 0x10020
+#define EPLL_LOCK 0x10030
+#define VPLL_LOCK 0x10040
+#define GPLL_LOCK 0x10050
+#define CPLL_CON0 0x10120
+#define EPLL_CON0 0x10130
+#define VPLL_CON0 0x10140
+#define GPLL_CON0 0x10150
#define SRC_TOP0 0x10210
#define SRC_TOP2 0x10218
#define SRC_GSCL 0x10220
@@ -59,9 +70,18 @@
#define GATE_IP_FSYS 0x10944
#define GATE_IP_PERIC 0x10950
#define GATE_IP_PERIS 0x10960
+#define BPLL_LOCK 0x20010
+#define BPLL_CON0 0x20110
#define SRC_CDREX 0x20200
#define PLL_DIV2_SEL 0x20a24
#define GATE_IP_DISP1 0x10928
+#define GATE_IP_ACP 0x10000
+
+/* list of PLLs to be registered */
+enum exynos5250_plls {
+ apll, mpll, cpll, epll, vpll, gpll, bpll,
+ nr_plls /* number of PLLs */
+};
/*
* Let each supported clock get a unique id. This id is used to lookup the clock
@@ -79,7 +99,8 @@ enum exynos5250_clks {
none,
/* core clocks */
- fin_pll,
+ fin_pll, fout_apll, fout_mpll, fout_bpll, fout_gpll, fout_cpll,
+ fout_epll, fout_vpll,
/* gate for special clocks (sclk) */
sclk_cam_bayer = 128, sclk_cam0, sclk_cam1, sclk_gscl_wa, sclk_gscl_wb,
@@ -87,7 +108,7 @@ enum exynos5250_clks {
sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3,
sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm,
sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
- div_i2s1, div_i2s2,
+ div_i2s1, div_i2s2, sclk_hdmiphy,
/* gate clocks */
gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0,
@@ -99,7 +120,10 @@ enum exynos5250_clks {
spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
- wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi,
+ wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
+
+ /* mux clocks */
+ mout_hdmi = 1024,
nr_clks,
};
@@ -108,7 +132,7 @@ enum exynos5250_clks {
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static __initdata unsigned long exynos5250_clk_regs[] = {
+static unsigned long exynos5250_clk_regs[] __initdata = {
SRC_CPU,
DIV_CPU0,
SRC_CORE1,
@@ -152,6 +176,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = {
SRC_CDREX,
PLL_DIV2_SEL,
GATE_IP_DISP1,
+ GATE_IP_ACP,
};
/* list of all parent clock list */
@@ -191,31 +216,34 @@ PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2",
"spdif_extclk" };
/* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
- FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
+ FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
FRATE(none, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
};
-struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
FFACTOR(none, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
FFACTOR(none, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
};
-struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initdata = {
+ MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
+};
+
+static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
- MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
MUX(none, "sclk_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
MUX(none, "sclk_epll", mout_epll_p, SRC_TOP2, 12, 1),
MUX(none, "sclk_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
@@ -232,7 +260,7 @@ struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
MUX(none, "mout_fimd1", mout_group1_p, SRC_DISP1_0, 0, 4),
MUX(none, "mout_mipi1", mout_group1_p, SRC_DISP1_0, 12, 4),
MUX(none, "mout_dp", mout_group1_p, SRC_DISP1_0, 16, 4),
- MUX(none, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
+ MUX(mout_hdmi, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
MUX(none, "mout_audio0", mout_audio0_p, SRC_MAU, 0, 4),
MUX(none, "mout_mmc0", mout_group1_p, SRC_FSYS, 0, 4),
MUX(none, "mout_mmc1", mout_group1_p, SRC_FSYS, 4, 4),
@@ -254,7 +282,7 @@ struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
MUX(none, "mout_spi2", mout_group1_p, SRC_PERIC1, 24, 4),
};
-struct samsung_div_clock exynos5250_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5250_div_clks[] __initdata = {
DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
DIV(none, "aclk66_pre", "sclk_mpll_user", DIV_TOP1, 24, 3),
@@ -314,7 +342,7 @@ struct samsung_div_clock exynos5250_div_clks[] __initdata = {
DIV_PERIC2, 8, 8, CLK_SET_RATE_PARENT, 0),
};
-struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(gscl0, "gscl0", "none", GATE_IP_GSCL, 0, 0, 0),
GATE(gscl1, "gscl1", "none", GATE_IP_GSCL, 1, 0, 0),
GATE(gscl2, "gscl2", "aclk266", GATE_IP_GSCL, 2, 0, 0),
@@ -461,20 +489,60 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(mie1, "mie1", "aclk200", GATE_IP_DISP1, 1, 0, 0),
GATE(dsim0, "dsim0", "aclk200", GATE_IP_DISP1, 3, 0, 0),
GATE(dp, "dp", "aclk200", GATE_IP_DISP1, 4, 0, 0),
- GATE(mixer, "mixer", "aclk200", GATE_IP_DISP1, 5, 0, 0),
- GATE(hdmi, "hdmi", "aclk200", GATE_IP_DISP1, 6, 0, 0),
+ GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
+ GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
+ GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
+};
+
+static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
+ /* sorted in descending order */
+ /* PLL_36XX_RATE(rate, m, p, s, k) */
+ PLL_36XX_RATE(266000000, 266, 3, 3, 0),
+ /* Not in UM, but need for eDP on snow */
+ PLL_36XX_RATE(70500000, 94, 2, 4, 0),
+ { },
+};
+
+static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
+ /* sorted in descending order */
+ /* PLL_36XX_RATE(rate, m, p, s, k) */
+ PLL_36XX_RATE(192000000, 64, 2, 2, 0),
+ PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
+ PLL_36XX_RATE(180000000, 90, 3, 2, 0),
+ PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
+ PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
+ PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
+ PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
+ PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
+ { },
+};
+
+static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
+ [apll] = PLL_A(pll_35xx, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, "fout_apll", NULL),
+ [mpll] = PLL_A(pll_35xx, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, "fout_mpll", NULL),
+ [bpll] = PLL(pll_35xx, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+ BPLL_CON0, NULL),
+ [gpll] = PLL(pll_35xx, fout_gpll, "fout_gpll", "fin_pll", GPLL_LOCK,
+ GPLL_CON0, NULL),
+ [cpll] = PLL(pll_35xx, fout_cpll, "fout_cpll", "fin_pll", CPLL_LOCK,
+ CPLL_CON0, NULL),
+ [epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ EPLL_CON0, NULL),
+ [vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "mout_vpllsrc",
+ VPLL_LOCK, VPLL_CON0, NULL),
};
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
{ },
};
/* register exynox5250 clocks */
-void __init exynos5250_clk_init(struct device_node *np)
+static void __init exynos5250_clk_init(struct device_node *np)
{
void __iomem *reg_base;
- struct clk *apll, *mpll, *epll, *vpll, *bpll, *gpll, *cpll;
if (np) {
reg_base = of_iomap(np, 0);
@@ -490,22 +558,17 @@ void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5250_fixed_rate_ext_clks),
ext_clk_match);
+ samsung_clk_register_mux(exynos5250_pll_pmux_clks,
+ ARRAY_SIZE(exynos5250_pll_pmux_clks));
+
+ if (_get_rate("fin_pll") == 24 * MHZ)
+ exynos5250_plls[epll].rate_table = epll_24mhz_tbl;
- apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
- reg_base + 0x100);
- mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
- reg_base + 0x4100);
- bpll = samsung_clk_register_pll35xx("fout_bpll", "fin_pll",
- reg_base + 0x20110);
- gpll = samsung_clk_register_pll35xx("fout_gpll", "fin_pll",
- reg_base + 0x10150);
- cpll = samsung_clk_register_pll35xx("fout_cpll", "fin_pll",
- reg_base + 0x10120);
- epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
- reg_base + 0x10130);
- vpll = samsung_clk_register_pll36xx("fout_vpll", "mout_vpllsrc",
- reg_base + 0x10140);
+ if (_get_rate("mout_vpllsrc") == 24 * MHZ)
+ exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl;
+ samsung_clk_register_pll(exynos5250_plls, ARRAY_SIZE(exynos5250_plls),
+ reg_base);
samsung_clk_register_fixed_rate(exynos5250_fixed_rate_clks,
ARRAY_SIZE(exynos5250_fixed_rate_clks));
samsung_clk_register_fixed_factor(exynos5250_fixed_factor_clks,
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 68a96cb..48c4a93 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -17,13 +17,30 @@
#include <linux/of_address.h>
#include "clk.h"
-#include "clk-pll.h"
+#define APLL_LOCK 0x0
+#define APLL_CON0 0x100
#define SRC_CPU 0x200
#define DIV_CPU0 0x500
#define DIV_CPU1 0x504
#define GATE_BUS_CPU 0x700
#define GATE_SCLK_CPU 0x800
+#define CPLL_LOCK 0x10020
+#define DPLL_LOCK 0x10030
+#define EPLL_LOCK 0x10040
+#define RPLL_LOCK 0x10050
+#define IPLL_LOCK 0x10060
+#define SPLL_LOCK 0x10070
+#define VPLL_LOCK 0x10070
+#define MPLL_LOCK 0x10090
+#define CPLL_CON0 0x10120
+#define DPLL_CON0 0x10128
+#define EPLL_CON0 0x10130
+#define RPLL_CON0 0x10140
+#define IPLL_CON0 0x10150
+#define SPLL_CON0 0x10160
+#define VPLL_CON0 0x10170
+#define MPLL_CON0 0x10180
#define SRC_TOP0 0x10200
#define SRC_TOP1 0x10204
#define SRC_TOP2 0x10208
@@ -75,15 +92,27 @@
#define GATE_TOP_SCLK_MAU 0x1083c
#define GATE_TOP_SCLK_FSYS 0x10840
#define GATE_TOP_SCLK_PERIC 0x10850
+#define BPLL_LOCK 0x20010
+#define BPLL_CON0 0x20110
#define SRC_CDREX 0x20200
+#define KPLL_LOCK 0x28000
+#define KPLL_CON0 0x28100
#define SRC_KFC 0x28200
#define DIV_KFC0 0x28500
+/* list of PLLs */
+enum exynos5420_plls {
+ apll, cpll, dpll, epll, rpll, ipll, spll, vpll, mpll,
+ bpll, kpll,
+ nr_plls /* number of PLLs */
+};
+
enum exynos5420_clks {
none,
/* core clocks */
- fin_pll,
+ fin_pll, fout_apll, fout_cpll, fout_dpll, fout_epll, fout_rpll,
+ fout_ipll, fout_spll, fout_vpll, fout_mpll, fout_bpll, fout_kpll,
/* gate for special clocks (sclk) */
sclk_uart0 = 128, sclk_uart1, sclk_uart2, sclk_uart3, sclk_mmc0,
@@ -91,7 +120,7 @@ enum exynos5420_clks {
sclk_i2s2, sclk_pcm1, sclk_pcm2, sclk_spdif, sclk_hdmi, sclk_pixel,
sclk_dp1, sclk_mipi1, sclk_fimd1, sclk_maudio0, sclk_maupcm0,
sclk_usbd300, sclk_usbd301, sclk_usbphy300, sclk_usbphy301, sclk_unipro,
- sclk_pwm, sclk_gscl_wa, sclk_gscl_wb,
+ sclk_pwm, sclk_gscl_wa, sclk_gscl_wb, sclk_hdmiphy,
/* gate clocks */
aclk66_peric = 256, uart0, uart1, uart2, uart3, i2c0, i2c1, i2c2, i2c3,
@@ -109,7 +138,13 @@ enum exynos5420_clks {
aclk300_gscl = 460, smmu_gscl0, smmu_gscl1, gscl_wa, gscl_wb, gscl0,
gscl1, clk_3aa, aclk266_g2d = 470, sss, slim_sss, mdma0,
aclk333_g2d = 480, g2d, aclk333_432_gscl = 490, smmu_3aa, smmu_fimcl0,
- smmu_fimcl1, smmu_fimcl3, fimc_lite3, aclk_g3d = 500, g3d,
+ smmu_fimcl1, smmu_fimcl3, fimc_lite3, aclk_g3d = 500, g3d, smmu_mixer,
+
+ /* mux clocks */
+ mout_hdmi = 640,
+
+ /* divider clocks */
+ dout_pixel = 768,
nr_clks,
};
@@ -118,7 +153,7 @@ enum exynos5420_clks {
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static __initdata unsigned long exynos5420_clk_regs[] = {
+static unsigned long exynos5420_clk_regs[] __initdata = {
SRC_CPU,
DIV_CPU0,
DIV_CPU1,
@@ -257,29 +292,29 @@ PNAME(audio2_p) = { "fin_pll", "cdclk2", "sclk_dpll", "sclk_mpll",
"sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
PNAME(spdif_p) = { "fin_pll", "dout_audio0", "dout_audio1", "dout_audio2",
"spdif_extclk", "sclk_ipll", "sclk_epll", "sclk_rpll" };
-PNAME(hdmi_p) = { "sclk_hdmiphy", "dout_hdmi_pixel" };
+PNAME(hdmi_p) = { "dout_hdmi_pixel", "sclk_hdmiphy" };
PNAME(maudio0_p) = { "fin_pll", "maudio_clk", "sclk_dpll", "sclk_mpll",
"sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
/* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
- FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
+ FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
FRATE(none, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
FRATE(none, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
};
-struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
FFACTOR(none, "sclk_hsic_12m", "fin_pll", 1, 2, 0),
};
-struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
MUX(none, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2),
MUX(none, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2),
MUX(none, "mout_apll", apll_p, SRC_CPU, 0, 1),
@@ -371,7 +406,7 @@ struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
MUX(none, "mout_mipi1", group2_p, SRC_DISP10, 16, 3),
MUX(none, "mout_dp1", group2_p, SRC_DISP10, 20, 3),
MUX(none, "mout_pixel", group2_p, SRC_DISP10, 24, 3),
- MUX(none, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
+ MUX(mout_hdmi, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
/* MAU Block */
MUX(none, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3),
@@ -399,7 +434,7 @@ struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
MUX(none, "mout_spi2", group2_p, SRC_PERIC1, 28, 3),
};
-struct samsung_div_clock exynos5420_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
DIV(none, "armclk2", "div_arm", DIV_CPU0, 28, 3),
@@ -431,7 +466,7 @@ struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(none, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4),
DIV(none, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
DIV(none, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
- DIV(none, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
+ DIV(dout_pixel, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
/* Audio Block */
DIV(none, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
@@ -479,7 +514,7 @@ struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(none, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8),
};
-struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
/* TODO: Re-verify the CG bits for all the gate clocks */
GATE_A(mct, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0, "mct"),
@@ -696,19 +731,43 @@ struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
GATE(smmu_mscl0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0, 0),
GATE(smmu_mscl1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0, 0),
GATE(smmu_mscl2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0, 0),
+ GATE(smmu_mixer, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0, 0),
};
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct samsung_pll_clock exynos5420_plls[nr_plls] __initdata = {
+ [apll] = PLL(pll_2550, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, NULL),
+ [cpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, NULL),
+ [dpll] = PLL(pll_2550, fout_dpll, "fout_dpll", "fin_pll", DPLL_LOCK,
+ DPLL_CON0, NULL),
+ [epll] = PLL(pll_2650, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ EPLL_CON0, NULL),
+ [rpll] = PLL(pll_2650, fout_rpll, "fout_rpll", "fin_pll", RPLL_LOCK,
+ RPLL_CON0, NULL),
+ [ipll] = PLL(pll_2550, fout_ipll, "fout_ipll", "fin_pll", IPLL_LOCK,
+ IPLL_CON0, NULL),
+ [spll] = PLL(pll_2550, fout_spll, "fout_spll", "fin_pll", SPLL_LOCK,
+ SPLL_CON0, NULL),
+ [vpll] = PLL(pll_2550, fout_vpll, "fout_vpll", "fin_pll", VPLL_LOCK,
+ VPLL_CON0, NULL),
+ [mpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, NULL),
+ [bpll] = PLL(pll_2550, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+ BPLL_CON0, NULL),
+ [kpll] = PLL(pll_2550, fout_kpll, "fout_kpll", "fin_pll", KPLL_LOCK,
+ KPLL_CON0, NULL),
+};
+
+static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,exynos5420-oscclk", .data = (void *)0, },
{ },
};
/* register exynos5420 clocks */
-void __init exynos5420_clk_init(struct device_node *np)
+static void __init exynos5420_clk_init(struct device_node *np)
{
void __iomem *reg_base;
- struct clk *apll, *bpll, *cpll, *dpll, *epll, *ipll, *kpll, *mpll;
- struct clk *rpll, *spll, *vpll;
if (np) {
reg_base = of_iomap(np, 0);
@@ -724,30 +783,8 @@ void __init exynos5420_clk_init(struct device_node *np)
samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5420_fixed_rate_ext_clks),
ext_clk_match);
-
- apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
- reg_base + 0x100);
- bpll = samsung_clk_register_pll35xx("fout_bpll", "fin_pll",
- reg_base + 0x20110);
- cpll = samsung_clk_register_pll35xx("fout_cpll", "fin_pll",
- reg_base + 0x10120);
- dpll = samsung_clk_register_pll35xx("fout_dpll", "fin_pll",
- reg_base + 0x10128);
- epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
- reg_base + 0x10130);
- ipll = samsung_clk_register_pll35xx("fout_ipll", "fin_pll",
- reg_base + 0x10150);
- kpll = samsung_clk_register_pll35xx("fout_kpll", "fin_pll",
- reg_base + 0x28100);
- mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
- reg_base + 0x10180);
- rpll = samsung_clk_register_pll36xx("fout_rpll", "fin_pll",
- reg_base + 0x10140);
- spll = samsung_clk_register_pll35xx("fout_spll", "fin_pll",
- reg_base + 0x10160);
- vpll = samsung_clk_register_pll35xx("fout_vpll", "fin_pll",
- reg_base + 0x10170);
-
+ samsung_clk_register_pll(exynos5420_plls, ARRAY_SIZE(exynos5420_plls),
+ reg_base);
samsung_clk_register_fixed_rate(exynos5420_fixed_rate_clks,
ARRAY_SIZE(exynos5420_fixed_rate_clks));
samsung_clk_register_fixed_factor(exynos5420_fixed_factor_clks,
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 7d54341..f865894 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -41,12 +41,12 @@ PNAME(mout_armclk_p) = { "cplla", "cpllb" };
PNAME(mout_spi_p) = { "div125", "div200" };
/* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
FRATE(none, "xtal", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks */
-struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
FRATE(none, "ppll", NULL, CLK_IS_ROOT, 1000000000),
FRATE(none, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
FRATE(none, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
@@ -55,26 +55,26 @@ struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
};
/* fixed factor clocks */
-struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
FFACTOR(none, "div250", "ppll", 1, 4, 0),
FFACTOR(none, "div200", "ppll", 1, 5, 0),
FFACTOR(none, "div125", "div250", 1, 2, 0),
};
/* mux clocks */
-struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
MUX(none, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
MUX_A(arm_clk, "arm_clk", mout_armclk_p,
CPU_CLK_STATUS, 0, 1, "armclk"),
};
/* divider clocks */
-struct samsung_div_clock exynos5440_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5440_div_clks[] __initdata = {
DIV(spi_baud, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
};
/* gate clocks */
-struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
GATE(pb0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
GATE(pr0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
GATE(pr1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
@@ -97,13 +97,13 @@ struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
GATE(cs250_o, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
};
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,clock-xtal", .data = (void *)0, },
{},
};
/* register exynos5440 clocks */
-void __init exynos5440_clk_init(struct device_node *np)
+static void __init exynos5440_clk_init(struct device_node *np)
{
void __iomem *reg_base;
@@ -132,7 +132,7 @@ void __init exynos5440_clk_init(struct device_node *np)
samsung_clk_register_gate(exynos5440_gate_clks,
ARRAY_SIZE(exynos5440_gate_clks));
- pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("armclk"));
+ pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk"));
pr_info("exynos5440 clock initialization complete\n");
}
CLK_OF_DECLARE(exynos5440_clk, "samsung,exynos5440-clock", exynos5440_clk_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 362f12d..529e11d 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -10,31 +10,73 @@
*/
#include <linux/errno.h>
+#include <linux/hrtimer.h>
#include "clk.h"
#include "clk-pll.h"
+#define PLL_TIMEOUT_MS 10
+
+struct samsung_clk_pll {
+ struct clk_hw hw;
+ void __iomem *lock_reg;
+ void __iomem *con_reg;
+ enum samsung_pll_type type;
+ unsigned int rate_count;
+ const struct samsung_pll_rate_table *rate_table;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct samsung_clk_pll, hw)
+
+static const struct samsung_pll_rate_table *samsung_get_pll_settings(
+ struct samsung_clk_pll *pll, unsigned long rate)
+{
+ const struct samsung_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++) {
+ if (rate == rate_table[i].rate)
+ return &rate_table[i];
+ }
+
+ return NULL;
+}
+
+static long samsung_pll_round_rate(struct clk_hw *hw,
+ unsigned long drate, unsigned long *prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ /* Assumming rate_table is in descending order */
+ for (i = 0; i < pll->rate_count; i++) {
+ if (drate >= rate_table[i].rate)
+ return rate_table[i].rate;
+ }
+
+ /* return minimum supported value */
+ return rate_table[i - 1].rate;
+}
+
/*
* PLL35xx Clock Type
*/
+/* Maximum lock time can be 270 * PDIV cycles */
+#define PLL35XX_LOCK_FACTOR (270)
#define PLL35XX_MDIV_MASK (0x3FF)
#define PLL35XX_PDIV_MASK (0x3F)
#define PLL35XX_SDIV_MASK (0x7)
+#define PLL35XX_LOCK_STAT_MASK (0x1)
#define PLL35XX_MDIV_SHIFT (16)
#define PLL35XX_PDIV_SHIFT (8)
#define PLL35XX_SDIV_SHIFT (0)
-
-struct samsung_clk_pll35xx {
- struct clk_hw hw;
- const void __iomem *con_reg;
-};
-
-#define to_clk_pll35xx(_hw) container_of(_hw, struct samsung_clk_pll35xx, hw)
+#define PLL35XX_LOCK_STAT_SHIFT (29)
static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll35xx *pll = to_clk_pll35xx(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 mdiv, pdiv, sdiv, pll_con;
u64 fvco = parent_rate;
@@ -49,48 +91,80 @@ static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
return (unsigned long)fvco;
}
-static const struct clk_ops samsung_pll35xx_clk_ops = {
- .recalc_rate = samsung_pll35xx_recalc_rate,
-};
-
-struct clk * __init samsung_clk_register_pll35xx(const char *name,
- const char *pname, const void __iomem *con_reg)
+static inline bool samsung_pll35xx_mp_change(
+ const struct samsung_pll_rate_table *rate, u32 pll_con)
{
- struct samsung_clk_pll35xx *pll;
- struct clk *clk;
- struct clk_init_data init;
+ u32 old_mdiv, old_pdiv;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
+ old_mdiv = (pll_con >> PLL35XX_MDIV_SHIFT) & PLL35XX_MDIV_MASK;
+ old_pdiv = (pll_con >> PLL35XX_PDIV_SHIFT) & PLL35XX_PDIV_MASK;
+
+ return (rate->mdiv != old_mdiv || rate->pdiv != old_pdiv);
+}
+
+static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 tmp;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
}
- init.name = name;
- init.ops = &samsung_pll35xx_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
+ tmp = __raw_readl(pll->con_reg);
- pll->hw.init = &init;
- pll->con_reg = con_reg;
+ if (!(samsung_pll35xx_mp_change(rate, tmp))) {
+ /* If only s change, change just s value only*/
+ tmp &= ~(PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT);
+ tmp |= rate->sdiv << PLL35XX_SDIV_SHIFT;
+ __raw_writel(tmp, pll->con_reg);
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
+ return 0;
}
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
-
- return clk;
+ /* Set PLL lock time. */
+ __raw_writel(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ pll->lock_reg);
+
+ /* Change PLL PMS values */
+ tmp &= ~((PLL35XX_MDIV_MASK << PLL35XX_MDIV_SHIFT) |
+ (PLL35XX_PDIV_MASK << PLL35XX_PDIV_SHIFT) |
+ (PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT));
+ tmp |= (rate->mdiv << PLL35XX_MDIV_SHIFT) |
+ (rate->pdiv << PLL35XX_PDIV_SHIFT) |
+ (rate->sdiv << PLL35XX_SDIV_SHIFT);
+ __raw_writel(tmp, pll->con_reg);
+
+ /* wait_lock_time */
+ do {
+ cpu_relax();
+ tmp = __raw_readl(pll->con_reg);
+ } while (!(tmp & (PLL35XX_LOCK_STAT_MASK
+ << PLL35XX_LOCK_STAT_SHIFT)));
+ return 0;
}
+static const struct clk_ops samsung_pll35xx_clk_ops = {
+ .recalc_rate = samsung_pll35xx_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll35xx_set_rate,
+};
+
+static const struct clk_ops samsung_pll35xx_clk_min_ops = {
+ .recalc_rate = samsung_pll35xx_recalc_rate,
+};
+
/*
* PLL36xx Clock Type
*/
+/* Maximum lock time can be 3000 * PDIV cycles */
+#define PLL36XX_LOCK_FACTOR (3000)
#define PLL36XX_KDIV_MASK (0xFFFF)
#define PLL36XX_MDIV_MASK (0x1FF)
@@ -99,18 +173,13 @@ struct clk * __init samsung_clk_register_pll35xx(const char *name,
#define PLL36XX_MDIV_SHIFT (16)
#define PLL36XX_PDIV_SHIFT (8)
#define PLL36XX_SDIV_SHIFT (0)
-
-struct samsung_clk_pll36xx {
- struct clk_hw hw;
- const void __iomem *con_reg;
-};
-
-#define to_clk_pll36xx(_hw) container_of(_hw, struct samsung_clk_pll36xx, hw)
+#define PLL36XX_KDIV_SHIFT (0)
+#define PLL36XX_LOCK_STAT_SHIFT (29)
static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
s16 kdiv;
u64 fvco = parent_rate;
@@ -129,68 +198,102 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
return (unsigned long)fvco;
}
-static const struct clk_ops samsung_pll36xx_clk_ops = {
- .recalc_rate = samsung_pll36xx_recalc_rate,
-};
-
-struct clk * __init samsung_clk_register_pll36xx(const char *name,
- const char *pname, const void __iomem *con_reg)
+static inline bool samsung_pll36xx_mpk_change(
+ const struct samsung_pll_rate_table *rate, u32 pll_con0, u32 pll_con1)
{
- struct samsung_clk_pll36xx *pll;
- struct clk *clk;
- struct clk_init_data init;
+ u32 old_mdiv, old_pdiv, old_kdiv;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
+ old_mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
+ old_kdiv = (pll_con1 >> PLL36XX_KDIV_SHIFT) & PLL36XX_KDIV_MASK;
+
+ return (rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+ rate->kdiv != old_kdiv);
+}
+
+static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 tmp, pll_con0, pll_con1;
+ const struct samsung_pll_rate_table *rate;
+
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
}
- init.name = name;
- init.ops = &samsung_pll36xx_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
+ pll_con0 = __raw_readl(pll->con_reg);
+ pll_con1 = __raw_readl(pll->con_reg + 4);
- pll->hw.init = &init;
- pll->con_reg = con_reg;
+ if (!(samsung_pll36xx_mpk_change(rate, pll_con0, pll_con1))) {
+ /* If only s change, change just s value only*/
+ pll_con0 &= ~(PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT);
+ pll_con0 |= (rate->sdiv << PLL36XX_SDIV_SHIFT);
+ __raw_writel(pll_con0, pll->con_reg);
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
+ return 0;
}
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
-
- return clk;
+ /* Set PLL lock time. */
+ __raw_writel(rate->pdiv * PLL36XX_LOCK_FACTOR, pll->lock_reg);
+
+ /* Change PLL PMS values */
+ pll_con0 &= ~((PLL36XX_MDIV_MASK << PLL36XX_MDIV_SHIFT) |
+ (PLL36XX_PDIV_MASK << PLL36XX_PDIV_SHIFT) |
+ (PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT));
+ pll_con0 |= (rate->mdiv << PLL36XX_MDIV_SHIFT) |
+ (rate->pdiv << PLL36XX_PDIV_SHIFT) |
+ (rate->sdiv << PLL36XX_SDIV_SHIFT);
+ __raw_writel(pll_con0, pll->con_reg);
+
+ pll_con1 &= ~(PLL36XX_KDIV_MASK << PLL36XX_KDIV_SHIFT);
+ pll_con1 |= rate->kdiv << PLL36XX_KDIV_SHIFT;
+ __raw_writel(pll_con1, pll->con_reg + 4);
+
+ /* wait_lock_time */
+ do {
+ cpu_relax();
+ tmp = __raw_readl(pll->con_reg);
+ } while (!(tmp & (1 << PLL36XX_LOCK_STAT_SHIFT)));
+
+ return 0;
}
+static const struct clk_ops samsung_pll36xx_clk_ops = {
+ .recalc_rate = samsung_pll36xx_recalc_rate,
+ .set_rate = samsung_pll36xx_set_rate,
+ .round_rate = samsung_pll_round_rate,
+};
+
+static const struct clk_ops samsung_pll36xx_clk_min_ops = {
+ .recalc_rate = samsung_pll36xx_recalc_rate,
+};
+
/*
* PLL45xx Clock Type
*/
+#define PLL4502_LOCK_FACTOR 400
+#define PLL4508_LOCK_FACTOR 240
#define PLL45XX_MDIV_MASK (0x3FF)
#define PLL45XX_PDIV_MASK (0x3F)
#define PLL45XX_SDIV_MASK (0x7)
+#define PLL45XX_AFC_MASK (0x1F)
#define PLL45XX_MDIV_SHIFT (16)
#define PLL45XX_PDIV_SHIFT (8)
#define PLL45XX_SDIV_SHIFT (0)
+#define PLL45XX_AFC_SHIFT (0)
-struct samsung_clk_pll45xx {
- struct clk_hw hw;
- enum pll45xx_type type;
- const void __iomem *con_reg;
-};
-
-#define to_clk_pll45xx(_hw) container_of(_hw, struct samsung_clk_pll45xx, hw)
+#define PLL45XX_ENABLE BIT(31)
+#define PLL45XX_LOCKED BIT(29)
static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll45xx *pll = to_clk_pll45xx(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 mdiv, pdiv, sdiv, pll_con;
u64 fvco = parent_rate;
@@ -208,54 +311,113 @@ static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw,
return (unsigned long)fvco;
}
-static const struct clk_ops samsung_pll45xx_clk_ops = {
- .recalc_rate = samsung_pll45xx_recalc_rate,
-};
-
-struct clk * __init samsung_clk_register_pll45xx(const char *name,
- const char *pname, const void __iomem *con_reg,
- enum pll45xx_type type)
+static bool samsung_pll45xx_mp_change(u32 pll_con0, u32 pll_con1,
+ const struct samsung_pll_rate_table *rate)
{
- struct samsung_clk_pll45xx *pll;
- struct clk *clk;
- struct clk_init_data init;
+ u32 old_mdiv, old_pdiv, old_afc;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
+ old_mdiv = (pll_con0 >> PLL45XX_MDIV_SHIFT) & PLL45XX_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL45XX_PDIV_SHIFT) & PLL45XX_PDIV_MASK;
+ old_afc = (pll_con1 >> PLL45XX_AFC_SHIFT) & PLL45XX_AFC_MASK;
+
+ return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv
+ || old_afc != rate->afc);
+}
+
+static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con1;
+ ktime_t start;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
}
- init.name = name;
- init.ops = &samsung_pll45xx_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
+ con0 = __raw_readl(pll->con_reg);
+ con1 = __raw_readl(pll->con_reg + 0x4);
- pll->hw.init = &init;
- pll->con_reg = con_reg;
- pll->type = type;
+ if (!(samsung_pll45xx_mp_change(con0, con1, rate))) {
+ /* If only s change, change just s value only*/
+ con0 &= ~(PLL45XX_SDIV_MASK << PLL45XX_SDIV_SHIFT);
+ con0 |= rate->sdiv << PLL45XX_SDIV_SHIFT;
+ __raw_writel(con0, pll->con_reg);
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
+ return 0;
}
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
+ /* Set PLL PMS values. */
+ con0 &= ~((PLL45XX_MDIV_MASK << PLL45XX_MDIV_SHIFT) |
+ (PLL45XX_PDIV_MASK << PLL45XX_PDIV_SHIFT) |
+ (PLL45XX_SDIV_MASK << PLL45XX_SDIV_SHIFT));
+ con0 |= (rate->mdiv << PLL45XX_MDIV_SHIFT) |
+ (rate->pdiv << PLL45XX_PDIV_SHIFT) |
+ (rate->sdiv << PLL45XX_SDIV_SHIFT);
+
+ /* Set PLL AFC value. */
+ con1 = __raw_readl(pll->con_reg + 0x4);
+ con1 &= ~(PLL45XX_AFC_MASK << PLL45XX_AFC_SHIFT);
+ con1 |= (rate->afc << PLL45XX_AFC_SHIFT);
+
+ /* Set PLL lock time. */
+ switch (pll->type) {
+ case pll_4502:
+ __raw_writel(rate->pdiv * PLL4502_LOCK_FACTOR, pll->lock_reg);
+ break;
+ case pll_4508:
+ __raw_writel(rate->pdiv * PLL4508_LOCK_FACTOR, pll->lock_reg);
+ break;
+ default:
+ break;
+ };
+
+ /* Set new configuration. */
+ __raw_writel(con1, pll->con_reg + 0x4);
+ __raw_writel(con0, pll->con_reg);
+
+ /* Wait for locking. */
+ start = ktime_get();
+ while (!(__raw_readl(pll->con_reg) & PLL45XX_LOCKED)) {
+ ktime_t delta = ktime_sub(ktime_get(), start);
+
+ if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
+ pr_err("%s: could not lock PLL %s\n",
+ __func__, __clk_get_name(hw->clk));
+ return -EFAULT;
+ }
+
+ cpu_relax();
+ }
- return clk;
+ return 0;
}
+static const struct clk_ops samsung_pll45xx_clk_ops = {
+ .recalc_rate = samsung_pll45xx_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll45xx_set_rate,
+};
+
+static const struct clk_ops samsung_pll45xx_clk_min_ops = {
+ .recalc_rate = samsung_pll45xx_recalc_rate,
+};
+
/*
* PLL46xx Clock Type
*/
+#define PLL46XX_LOCK_FACTOR 3000
+#define PLL46XX_VSEL_MASK (1)
#define PLL46XX_MDIV_MASK (0x1FF)
#define PLL46XX_PDIV_MASK (0x3F)
#define PLL46XX_SDIV_MASK (0x7)
+#define PLL46XX_VSEL_SHIFT (27)
#define PLL46XX_MDIV_SHIFT (16)
#define PLL46XX_PDIV_SHIFT (8)
#define PLL46XX_SDIV_SHIFT (0)
@@ -263,19 +425,20 @@ struct clk * __init samsung_clk_register_pll45xx(const char *name,
#define PLL46XX_KDIV_MASK (0xFFFF)
#define PLL4650C_KDIV_MASK (0xFFF)
#define PLL46XX_KDIV_SHIFT (0)
+#define PLL46XX_MFR_MASK (0x3F)
+#define PLL46XX_MRR_MASK (0x1F)
+#define PLL46XX_KDIV_SHIFT (0)
+#define PLL46XX_MFR_SHIFT (16)
+#define PLL46XX_MRR_SHIFT (24)
-struct samsung_clk_pll46xx {
- struct clk_hw hw;
- enum pll46xx_type type;
- const void __iomem *con_reg;
-};
-
-#define to_clk_pll46xx(_hw) container_of(_hw, struct samsung_clk_pll46xx, hw)
+#define PLL46XX_ENABLE BIT(31)
+#define PLL46XX_LOCKED BIT(29)
+#define PLL46XX_VSEL BIT(27)
static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll46xx *pll = to_clk_pll46xx(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1, shift;
u64 fvco = parent_rate;
@@ -295,47 +458,175 @@ static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
return (unsigned long)fvco;
}
+static bool samsung_pll46xx_mpk_change(u32 pll_con0, u32 pll_con1,
+ const struct samsung_pll_rate_table *rate)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
+ old_kdiv = (pll_con1 >> PLL46XX_KDIV_SHIFT) & PLL46XX_KDIV_MASK;
+
+ return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv
+ || old_kdiv != rate->kdiv);
+}
+
+static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con1, lock;
+ ktime_t start;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ con0 = __raw_readl(pll->con_reg);
+ con1 = __raw_readl(pll->con_reg + 0x4);
+
+ if (!(samsung_pll46xx_mpk_change(con0, con1, rate))) {
+ /* If only s change, change just s value only*/
+ con0 &= ~(PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT);
+ con0 |= rate->sdiv << PLL46XX_SDIV_SHIFT;
+ __raw_writel(con0, pll->con_reg);
+
+ return 0;
+ }
+
+ /* Set PLL lock time. */
+ lock = rate->pdiv * PLL46XX_LOCK_FACTOR;
+ if (lock > 0xffff)
+ /* Maximum lock time bitfield is 16-bit. */
+ lock = 0xffff;
+
+ /* Set PLL PMS and VSEL values. */
+ con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
+ (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
+ (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) |
+ (PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT));
+ con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) |
+ (rate->pdiv << PLL46XX_PDIV_SHIFT) |
+ (rate->sdiv << PLL46XX_SDIV_SHIFT) |
+ (rate->vsel << PLL46XX_VSEL_SHIFT);
+
+ /* Set PLL K, MFR and MRR values. */
+ con1 = __raw_readl(pll->con_reg + 0x4);
+ con1 &= ~((PLL46XX_KDIV_MASK << PLL46XX_KDIV_SHIFT) |
+ (PLL46XX_MFR_MASK << PLL46XX_MFR_SHIFT) |
+ (PLL46XX_MRR_MASK << PLL46XX_MRR_SHIFT));
+ con1 |= (rate->kdiv << PLL46XX_KDIV_SHIFT) |
+ (rate->mfr << PLL46XX_MFR_SHIFT) |
+ (rate->mrr << PLL46XX_MRR_SHIFT);
+
+ /* Write configuration to PLL */
+ __raw_writel(lock, pll->lock_reg);
+ __raw_writel(con0, pll->con_reg);
+ __raw_writel(con1, pll->con_reg + 0x4);
+
+ /* Wait for locking. */
+ start = ktime_get();
+ while (!(__raw_readl(pll->con_reg) & PLL46XX_LOCKED)) {
+ ktime_t delta = ktime_sub(ktime_get(), start);
+
+ if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
+ pr_err("%s: could not lock PLL %s\n",
+ __func__, __clk_get_name(hw->clk));
+ return -EFAULT;
+ }
+
+ cpu_relax();
+ }
+
+ return 0;
+}
+
static const struct clk_ops samsung_pll46xx_clk_ops = {
.recalc_rate = samsung_pll46xx_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll46xx_set_rate,
+};
+
+static const struct clk_ops samsung_pll46xx_clk_min_ops = {
+ .recalc_rate = samsung_pll46xx_recalc_rate,
};
-struct clk * __init samsung_clk_register_pll46xx(const char *name,
- const char *pname, const void __iomem *con_reg,
- enum pll46xx_type type)
+/*
+ * PLL6552 Clock Type
+ */
+
+#define PLL6552_MDIV_MASK 0x3ff
+#define PLL6552_PDIV_MASK 0x3f
+#define PLL6552_SDIV_MASK 0x7
+#define PLL6552_MDIV_SHIFT 16
+#define PLL6552_PDIV_SHIFT 8
+#define PLL6552_SDIV_SHIFT 0
+
+static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- struct samsung_clk_pll46xx *pll;
- struct clk *clk;
- struct clk_init_data init;
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, pll_con;
+ u64 fvco = parent_rate;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
- }
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLL6552_MDIV_SHIFT) & PLL6552_MDIV_MASK;
+ pdiv = (pll_con >> PLL6552_PDIV_SHIFT) & PLL6552_PDIV_MASK;
+ sdiv = (pll_con >> PLL6552_SDIV_SHIFT) & PLL6552_SDIV_MASK;
- init.name = name;
- init.ops = &samsung_pll46xx_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
+ fvco *= mdiv;
+ do_div(fvco, (pdiv << sdiv));
- pll->hw.init = &init;
- pll->con_reg = con_reg;
- pll->type = type;
+ return (unsigned long)fvco;
+}
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
- }
+static const struct clk_ops samsung_pll6552_clk_ops = {
+ .recalc_rate = samsung_pll6552_recalc_rate,
+};
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
+/*
+ * PLL6553 Clock Type
+ */
- return clk;
+#define PLL6553_MDIV_MASK 0xff
+#define PLL6553_PDIV_MASK 0x3f
+#define PLL6553_SDIV_MASK 0x7
+#define PLL6553_KDIV_MASK 0xffff
+#define PLL6553_MDIV_SHIFT 16
+#define PLL6553_PDIV_SHIFT 8
+#define PLL6553_SDIV_SHIFT 0
+#define PLL6553_KDIV_SHIFT 0
+
+static unsigned long samsung_pll6553_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
+ u64 fvco = parent_rate;
+
+ pll_con0 = __raw_readl(pll->con_reg);
+ pll_con1 = __raw_readl(pll->con_reg + 0x4);
+ mdiv = (pll_con0 >> PLL6553_MDIV_SHIFT) & PLL6553_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL6553_PDIV_SHIFT) & PLL6553_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL6553_SDIV_SHIFT) & PLL6553_SDIV_MASK;
+ kdiv = (pll_con1 >> PLL6553_KDIV_SHIFT) & PLL6553_KDIV_MASK;
+
+ fvco *= (mdiv << 16) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= 16;
+
+ return (unsigned long)fvco;
}
+static const struct clk_ops samsung_pll6553_clk_ops = {
+ .recalc_rate = samsung_pll6553_recalc_rate,
+};
+
/*
* PLL2550x Clock Type
*/
@@ -418,3 +709,117 @@ struct clk * __init samsung_clk_register_pll2550x(const char *name,
return clk;
}
+
+static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
+ void __iomem *base)
+{
+ struct samsung_clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+ int ret, len;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate pll clk %s\n",
+ __func__, pll_clk->name);
+ return;
+ }
+
+ init.name = pll_clk->name;
+ init.flags = pll_clk->flags;
+ init.parent_names = &pll_clk->parent_name;
+ init.num_parents = 1;
+
+ if (pll_clk->rate_table) {
+ /* find count of rates in rate_table */
+ for (len = 0; pll_clk->rate_table[len].rate != 0; )
+ len++;
+
+ pll->rate_count = len;
+ pll->rate_table = kmemdup(pll_clk->rate_table,
+ pll->rate_count *
+ sizeof(struct samsung_pll_rate_table),
+ GFP_KERNEL);
+ WARN(!pll->rate_table,
+ "%s: could not allocate rate table for %s\n",
+ __func__, pll_clk->name);
+ }
+
+ switch (pll_clk->type) {
+ /* clk_ops for 35xx and 2550 are similar */
+ case pll_35xx:
+ case pll_2550:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll35xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll35xx_clk_ops;
+ break;
+ case pll_4500:
+ init.ops = &samsung_pll45xx_clk_min_ops;
+ break;
+ case pll_4502:
+ case pll_4508:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll45xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll45xx_clk_ops;
+ break;
+ /* clk_ops for 36xx and 2650 are similar */
+ case pll_36xx:
+ case pll_2650:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll36xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll36xx_clk_ops;
+ break;
+ case pll_6552:
+ init.ops = &samsung_pll6552_clk_ops;
+ break;
+ case pll_6553:
+ init.ops = &samsung_pll6553_clk_ops;
+ break;
+ case pll_4600:
+ case pll_4650:
+ case pll_4650c:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll46xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll46xx_clk_ops;
+ break;
+ default:
+ pr_warn("%s: Unknown pll type for pll clk %s\n",
+ __func__, pll_clk->name);
+ }
+
+ pll->hw.init = &init;
+ pll->type = pll_clk->type;
+ pll->lock_reg = base + pll_clk->lock_offset;
+ pll->con_reg = base + pll_clk->con_offset;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll clock %s : %ld\n",
+ __func__, pll_clk->name, PTR_ERR(clk));
+ kfree(pll);
+ return;
+ }
+
+ samsung_clk_add_lookup(clk, pll_clk->id);
+
+ if (!pll_clk->alias)
+ return;
+
+ ret = clk_register_clkdev(clk, pll_clk->alias, pll_clk->dev_name);
+ if (ret)
+ pr_err("%s: failed to register lookup for %s : %d",
+ __func__, pll_clk->name, ret);
+}
+
+void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
+ unsigned int nr_pll, void __iomem *base)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < nr_pll; cnt++)
+ _samsung_clk_register_pll(&pll_list[cnt], base);
+}
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index f33786e..6c39030 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -12,28 +12,83 @@
#ifndef __SAMSUNG_CLK_PLL_H
#define __SAMSUNG_CLK_PLL_H
-enum pll45xx_type {
+enum samsung_pll_type {
+ pll_35xx,
+ pll_36xx,
+ pll_2550,
+ pll_2650,
pll_4500,
pll_4502,
- pll_4508
-};
-
-enum pll46xx_type {
+ pll_4508,
pll_4600,
pll_4650,
pll_4650c,
+ pll_6552,
+ pll_6553,
+};
+
+#define PLL_35XX_RATE(_rate, _m, _p, _s) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_36XX_RATE(_rate, _m, _p, _s, _k) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ }
+
+#define PLL_45XX_RATE(_rate, _m, _p, _s, _afc) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .afc = (_afc), \
+ }
+
+#define PLL_4600_RATE(_rate, _m, _p, _s, _k, _vsel) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ .vsel = (_vsel), \
+ }
+
+#define PLL_4650_RATE(_rate, _m, _p, _s, _k, _mfr, _mrr, _vsel) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ .mfr = (_mfr), \
+ .mrr = (_mrr), \
+ .vsel = (_vsel), \
+ }
+
+/* NOTE: Rate table should be kept sorted in descending order. */
+
+struct samsung_pll_rate_table {
+ unsigned int rate;
+ unsigned int pdiv;
+ unsigned int mdiv;
+ unsigned int sdiv;
+ unsigned int kdiv;
+ unsigned int afc;
+ unsigned int mfr;
+ unsigned int mrr;
+ unsigned int vsel;
};
-extern struct clk * __init samsung_clk_register_pll35xx(const char *name,
- const char *pname, const void __iomem *con_reg);
-extern struct clk * __init samsung_clk_register_pll36xx(const char *name,
- const char *pname, const void __iomem *con_reg);
-extern struct clk * __init samsung_clk_register_pll45xx(const char *name,
- const char *pname, const void __iomem *con_reg,
- enum pll45xx_type type);
-extern struct clk * __init samsung_clk_register_pll46xx(const char *name,
- const char *pname, const void __iomem *con_reg,
- enum pll46xx_type type);
extern struct clk * __init samsung_clk_register_pll2550x(const char *name,
const char *pname, const void __iomem *reg_base,
const unsigned long offset);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
new file mode 100644
index 0000000..7d2c842
--- /dev/null
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for all S3C64xx SoCs.
+*/
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+/* S3C64xx clock controller register offsets. */
+#define APLL_LOCK 0x000
+#define MPLL_LOCK 0x004
+#define EPLL_LOCK 0x008
+#define APLL_CON 0x00c
+#define MPLL_CON 0x010
+#define EPLL_CON0 0x014
+#define EPLL_CON1 0x018
+#define CLK_SRC 0x01c
+#define CLK_DIV0 0x020
+#define CLK_DIV1 0x024
+#define CLK_DIV2 0x028
+#define HCLK_GATE 0x030
+#define PCLK_GATE 0x034
+#define SCLK_GATE 0x038
+#define MEM0_GATE 0x03c
+#define CLK_SRC2 0x10c
+#define OTHERS 0x900
+
+/* Helper macros to define clock arrays. */
+#define FIXED_RATE_CLOCKS(name) \
+ static struct samsung_fixed_rate_clock name[]
+#define MUX_CLOCKS(name) \
+ static struct samsung_mux_clock name[]
+#define DIV_CLOCKS(name) \
+ static struct samsung_div_clock name[]
+#define GATE_CLOCKS(name) \
+ static struct samsung_gate_clock name[]
+
+/* Helper macros for gate types present on S3C64xx. */
+#define GATE_BUS(_id, cname, pname, o, b) \
+ GATE(_id, cname, pname, o, b, 0, 0)
+#define GATE_SCLK(_id, cname, pname, o, b) \
+ GATE(_id, cname, pname, o, b, CLK_SET_RATE_PARENT, 0)
+#define GATE_ON(_id, cname, pname, o, b) \
+ GATE(_id, cname, pname, o, b, CLK_IGNORE_UNUSED, 0)
+
+/* list of PLLs to be registered */
+enum s3c64xx_plls {
+ apll, mpll, epll,
+};
+
+/*
+ * List of controller registers to be saved and restored during
+ * a suspend/resume cycle.
+ */
+static unsigned long s3c64xx_clk_regs[] __initdata = {
+ APLL_LOCK,
+ MPLL_LOCK,
+ EPLL_LOCK,
+ APLL_CON,
+ MPLL_CON,
+ EPLL_CON0,
+ EPLL_CON1,
+ CLK_SRC,
+ CLK_DIV0,
+ CLK_DIV1,
+ CLK_DIV2,
+ HCLK_GATE,
+ PCLK_GATE,
+ SCLK_GATE,
+};
+
+static unsigned long s3c6410_clk_regs[] __initdata = {
+ CLK_SRC2,
+ MEM0_GATE,
+};
+
+/* List of parent clocks common for all S3C64xx SoCs. */
+PNAME(spi_mmc_p) = { "mout_epll", "dout_mpll", "fin_pll", "clk27m" };
+PNAME(uart_p) = { "mout_epll", "dout_mpll" };
+PNAME(audio0_p) = { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk0",
+ "pcmcdclk0", "none", "none", "none" };
+PNAME(audio1_p) = { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk1",
+ "pcmcdclk0", "none", "none", "none" };
+PNAME(mfc_p) = { "hclkx2", "mout_epll" };
+PNAME(apll_p) = { "fin_pll", "fout_apll" };
+PNAME(mpll_p) = { "fin_pll", "fout_mpll" };
+PNAME(epll_p) = { "fin_pll", "fout_epll" };
+PNAME(hclkx2_p) = { "mout_mpll", "mout_apll" };
+
+/* S3C6400-specific parent clocks. */
+PNAME(scaler_lcd_p6400) = { "mout_epll", "dout_mpll", "none", "none" };
+PNAME(irda_p6400) = { "mout_epll", "dout_mpll", "none", "clk48m" };
+PNAME(uhost_p6400) = { "clk48m", "mout_epll", "dout_mpll", "none" };
+
+/* S3C6410-specific parent clocks. */
+PNAME(clk27_p6410) = { "clk27m", "fin_pll" };
+PNAME(scaler_lcd_p6410) = { "mout_epll", "dout_mpll", "fin_pll", "none" };
+PNAME(irda_p6410) = { "mout_epll", "dout_mpll", "fin_pll", "clk48m" };
+PNAME(uhost_p6410) = { "clk48m", "mout_epll", "dout_mpll", "fin_pll" };
+PNAME(audio2_p6410) = { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk2",
+ "pcmcdclk1", "none", "none", "none" };
+
+/* Fixed rate clocks generated outside the SoC. */
+FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_ext_clks) __initdata = {
+ FRATE(0, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "xusbxti", NULL, CLK_IS_ROOT, 0),
+};
+
+/* Fixed rate clocks generated inside the SoC. */
+FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_clks) __initdata = {
+ FRATE(CLK27M, "clk27m", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(CLK48M, "clk48m", NULL, CLK_IS_ROOT, 48000000),
+};
+
+/* List of clock muxes present on all S3C64xx SoCs. */
+MUX_CLOCKS(s3c64xx_mux_clks) __initdata = {
+ MUX_F(0, "mout_syncmux", hclkx2_p, OTHERS, 6, 1, 0, CLK_MUX_READ_ONLY),
+ MUX(MOUT_APLL, "mout_apll", apll_p, CLK_SRC, 0, 1),
+ MUX(MOUT_MPLL, "mout_mpll", mpll_p, CLK_SRC, 1, 1),
+ MUX(MOUT_EPLL, "mout_epll", epll_p, CLK_SRC, 2, 1),
+ MUX(MOUT_MFC, "mout_mfc", mfc_p, CLK_SRC, 4, 1),
+ MUX(MOUT_AUDIO0, "mout_audio0", audio0_p, CLK_SRC, 7, 3),
+ MUX(MOUT_AUDIO1, "mout_audio1", audio1_p, CLK_SRC, 10, 3),
+ MUX(MOUT_UART, "mout_uart", uart_p, CLK_SRC, 13, 1),
+ MUX(MOUT_SPI0, "mout_spi0", spi_mmc_p, CLK_SRC, 14, 2),
+ MUX(MOUT_SPI1, "mout_spi1", spi_mmc_p, CLK_SRC, 16, 2),
+ MUX(MOUT_MMC0, "mout_mmc0", spi_mmc_p, CLK_SRC, 18, 2),
+ MUX(MOUT_MMC1, "mout_mmc1", spi_mmc_p, CLK_SRC, 20, 2),
+ MUX(MOUT_MMC2, "mout_mmc2", spi_mmc_p, CLK_SRC, 22, 2),
+};
+
+/* List of clock muxes present on S3C6400. */
+MUX_CLOCKS(s3c6400_mux_clks) __initdata = {
+ MUX(MOUT_UHOST, "mout_uhost", uhost_p6400, CLK_SRC, 5, 2),
+ MUX(MOUT_IRDA, "mout_irda", irda_p6400, CLK_SRC, 24, 2),
+ MUX(MOUT_LCD, "mout_lcd", scaler_lcd_p6400, CLK_SRC, 26, 2),
+ MUX(MOUT_SCALER, "mout_scaler", scaler_lcd_p6400, CLK_SRC, 28, 2),
+};
+
+/* List of clock muxes present on S3C6410. */
+MUX_CLOCKS(s3c6410_mux_clks) __initdata = {
+ MUX(MOUT_UHOST, "mout_uhost", uhost_p6410, CLK_SRC, 5, 2),
+ MUX(MOUT_IRDA, "mout_irda", irda_p6410, CLK_SRC, 24, 2),
+ MUX(MOUT_LCD, "mout_lcd", scaler_lcd_p6410, CLK_SRC, 26, 2),
+ MUX(MOUT_SCALER, "mout_scaler", scaler_lcd_p6410, CLK_SRC, 28, 2),
+ MUX(MOUT_DAC27, "mout_dac27", clk27_p6410, CLK_SRC, 30, 1),
+ MUX(MOUT_TV27, "mout_tv27", clk27_p6410, CLK_SRC, 31, 1),
+ MUX(MOUT_AUDIO2, "mout_audio2", audio2_p6410, CLK_SRC2, 0, 3),
+};
+
+/* List of clock dividers present on all S3C64xx SoCs. */
+DIV_CLOCKS(s3c64xx_div_clks) __initdata = {
+ DIV(DOUT_MPLL, "dout_mpll", "mout_mpll", CLK_DIV0, 4, 1),
+ DIV(HCLKX2, "hclkx2", "mout_syncmux", CLK_DIV0, 9, 3),
+ DIV(HCLK, "hclk", "hclkx2", CLK_DIV0, 8, 1),
+ DIV(PCLK, "pclk", "hclkx2", CLK_DIV0, 12, 4),
+ DIV(DOUT_SECUR, "dout_secur", "hclkx2", CLK_DIV0, 18, 2),
+ DIV(DOUT_CAM, "dout_cam", "hclkx2", CLK_DIV0, 20, 4),
+ DIV(DOUT_JPEG, "dout_jpeg", "hclkx2", CLK_DIV0, 24, 4),
+ DIV(DOUT_MFC, "dout_mfc", "mout_mfc", CLK_DIV0, 28, 4),
+ DIV(DOUT_MMC0, "dout_mmc0", "mout_mmc0", CLK_DIV1, 0, 4),
+ DIV(DOUT_MMC1, "dout_mmc1", "mout_mmc1", CLK_DIV1, 4, 4),
+ DIV(DOUT_MMC2, "dout_mmc2", "mout_mmc2", CLK_DIV1, 8, 4),
+ DIV(DOUT_LCD, "dout_lcd", "mout_lcd", CLK_DIV1, 12, 4),
+ DIV(DOUT_SCALER, "dout_scaler", "mout_scaler", CLK_DIV1, 16, 4),
+ DIV(DOUT_UHOST, "dout_uhost", "mout_uhost", CLK_DIV1, 20, 4),
+ DIV(DOUT_SPI0, "dout_spi0", "mout_spi0", CLK_DIV2, 0, 4),
+ DIV(DOUT_SPI1, "dout_spi1", "mout_spi1", CLK_DIV2, 4, 4),
+ DIV(DOUT_AUDIO0, "dout_audio0", "mout_audio0", CLK_DIV2, 8, 4),
+ DIV(DOUT_AUDIO1, "dout_audio1", "mout_audio1", CLK_DIV2, 12, 4),
+ DIV(DOUT_UART, "dout_uart", "mout_uart", CLK_DIV2, 16, 4),
+ DIV(DOUT_IRDA, "dout_irda", "mout_irda", CLK_DIV2, 20, 4),
+};
+
+/* List of clock dividers present on S3C6400. */
+DIV_CLOCKS(s3c6400_div_clks) __initdata = {
+ DIV(ARMCLK, "armclk", "mout_apll", CLK_DIV0, 0, 3),
+};
+
+/* List of clock dividers present on S3C6410. */
+DIV_CLOCKS(s3c6410_div_clks) __initdata = {
+ DIV(ARMCLK, "armclk", "mout_apll", CLK_DIV0, 0, 4),
+ DIV(DOUT_FIMC, "dout_fimc", "hclk", CLK_DIV1, 24, 4),
+ DIV(DOUT_AUDIO2, "dout_audio2", "mout_audio2", CLK_DIV2, 24, 4),
+};
+
+/* List of clock gates present on all S3C64xx SoCs. */
+GATE_CLOCKS(s3c64xx_gate_clks) __initdata = {
+ GATE_BUS(HCLK_UHOST, "hclk_uhost", "hclk", HCLK_GATE, 29),
+ GATE_BUS(HCLK_SECUR, "hclk_secur", "hclk", HCLK_GATE, 28),
+ GATE_BUS(HCLK_SDMA1, "hclk_sdma1", "hclk", HCLK_GATE, 27),
+ GATE_BUS(HCLK_SDMA0, "hclk_sdma0", "hclk", HCLK_GATE, 26),
+ GATE_ON(HCLK_DDR1, "hclk_ddr1", "hclk", HCLK_GATE, 24),
+ GATE_BUS(HCLK_USB, "hclk_usb", "hclk", HCLK_GATE, 20),
+ GATE_BUS(HCLK_HSMMC2, "hclk_hsmmc2", "hclk", HCLK_GATE, 19),
+ GATE_BUS(HCLK_HSMMC1, "hclk_hsmmc1", "hclk", HCLK_GATE, 18),
+ GATE_BUS(HCLK_HSMMC0, "hclk_hsmmc0", "hclk", HCLK_GATE, 17),
+ GATE_BUS(HCLK_MDP, "hclk_mdp", "hclk", HCLK_GATE, 16),
+ GATE_BUS(HCLK_DHOST, "hclk_dhost", "hclk", HCLK_GATE, 15),
+ GATE_BUS(HCLK_IHOST, "hclk_ihost", "hclk", HCLK_GATE, 14),
+ GATE_BUS(HCLK_DMA1, "hclk_dma1", "hclk", HCLK_GATE, 13),
+ GATE_BUS(HCLK_DMA0, "hclk_dma0", "hclk", HCLK_GATE, 12),
+ GATE_BUS(HCLK_JPEG, "hclk_jpeg", "hclk", HCLK_GATE, 11),
+ GATE_BUS(HCLK_CAMIF, "hclk_camif", "hclk", HCLK_GATE, 10),
+ GATE_BUS(HCLK_SCALER, "hclk_scaler", "hclk", HCLK_GATE, 9),
+ GATE_BUS(HCLK_2D, "hclk_2d", "hclk", HCLK_GATE, 8),
+ GATE_BUS(HCLK_TV, "hclk_tv", "hclk", HCLK_GATE, 7),
+ GATE_BUS(HCLK_POST0, "hclk_post0", "hclk", HCLK_GATE, 5),
+ GATE_BUS(HCLK_ROT, "hclk_rot", "hclk", HCLK_GATE, 4),
+ GATE_BUS(HCLK_LCD, "hclk_lcd", "hclk", HCLK_GATE, 3),
+ GATE_BUS(HCLK_TZIC, "hclk_tzic", "hclk", HCLK_GATE, 2),
+ GATE_ON(HCLK_INTC, "hclk_intc", "hclk", HCLK_GATE, 1),
+ GATE_ON(PCLK_SKEY, "pclk_skey", "pclk", PCLK_GATE, 24),
+ GATE_ON(PCLK_CHIPID, "pclk_chipid", "pclk", PCLK_GATE, 23),
+ GATE_BUS(PCLK_SPI1, "pclk_spi1", "pclk", PCLK_GATE, 22),
+ GATE_BUS(PCLK_SPI0, "pclk_spi0", "pclk", PCLK_GATE, 21),
+ GATE_BUS(PCLK_HSIRX, "pclk_hsirx", "pclk", PCLK_GATE, 20),
+ GATE_BUS(PCLK_HSITX, "pclk_hsitx", "pclk", PCLK_GATE, 19),
+ GATE_ON(PCLK_GPIO, "pclk_gpio", "pclk", PCLK_GATE, 18),
+ GATE_BUS(PCLK_IIC0, "pclk_iic0", "pclk", PCLK_GATE, 17),
+ GATE_BUS(PCLK_IIS1, "pclk_iis1", "pclk", PCLK_GATE, 16),
+ GATE_BUS(PCLK_IIS0, "pclk_iis0", "pclk", PCLK_GATE, 15),
+ GATE_BUS(PCLK_AC97, "pclk_ac97", "pclk", PCLK_GATE, 14),
+ GATE_BUS(PCLK_TZPC, "pclk_tzpc", "pclk", PCLK_GATE, 13),
+ GATE_BUS(PCLK_TSADC, "pclk_tsadc", "pclk", PCLK_GATE, 12),
+ GATE_BUS(PCLK_KEYPAD, "pclk_keypad", "pclk", PCLK_GATE, 11),
+ GATE_BUS(PCLK_IRDA, "pclk_irda", "pclk", PCLK_GATE, 10),
+ GATE_BUS(PCLK_PCM1, "pclk_pcm1", "pclk", PCLK_GATE, 9),
+ GATE_BUS(PCLK_PCM0, "pclk_pcm0", "pclk", PCLK_GATE, 8),
+ GATE_BUS(PCLK_PWM, "pclk_pwm", "pclk", PCLK_GATE, 7),
+ GATE_BUS(PCLK_RTC, "pclk_rtc", "pclk", PCLK_GATE, 6),
+ GATE_BUS(PCLK_WDT, "pclk_wdt", "pclk", PCLK_GATE, 5),
+ GATE_BUS(PCLK_UART3, "pclk_uart3", "pclk", PCLK_GATE, 4),
+ GATE_BUS(PCLK_UART2, "pclk_uart2", "pclk", PCLK_GATE, 3),
+ GATE_BUS(PCLK_UART1, "pclk_uart1", "pclk", PCLK_GATE, 2),
+ GATE_BUS(PCLK_UART0, "pclk_uart0", "pclk", PCLK_GATE, 1),
+ GATE_BUS(PCLK_MFC, "pclk_mfc", "pclk", PCLK_GATE, 0),
+ GATE_SCLK(SCLK_UHOST, "sclk_uhost", "dout_uhost", SCLK_GATE, 30),
+ GATE_SCLK(SCLK_MMC2_48, "sclk_mmc2_48", "clk48m", SCLK_GATE, 29),
+ GATE_SCLK(SCLK_MMC1_48, "sclk_mmc1_48", "clk48m", SCLK_GATE, 28),
+ GATE_SCLK(SCLK_MMC0_48, "sclk_mmc0_48", "clk48m", SCLK_GATE, 27),
+ GATE_SCLK(SCLK_MMC2, "sclk_mmc2", "dout_mmc2", SCLK_GATE, 26),
+ GATE_SCLK(SCLK_MMC1, "sclk_mmc1", "dout_mmc1", SCLK_GATE, 25),
+ GATE_SCLK(SCLK_MMC0, "sclk_mmc0", "dout_mmc0", SCLK_GATE, 24),
+ GATE_SCLK(SCLK_SPI1_48, "sclk_spi1_48", "clk48m", SCLK_GATE, 23),
+ GATE_SCLK(SCLK_SPI0_48, "sclk_spi0_48", "clk48m", SCLK_GATE, 22),
+ GATE_SCLK(SCLK_SPI1, "sclk_spi1", "dout_spi1", SCLK_GATE, 21),
+ GATE_SCLK(SCLK_SPI0, "sclk_spi0", "dout_spi0", SCLK_GATE, 20),
+ GATE_SCLK(SCLK_DAC27, "sclk_dac27", "mout_dac27", SCLK_GATE, 19),
+ GATE_SCLK(SCLK_TV27, "sclk_tv27", "mout_tv27", SCLK_GATE, 18),
+ GATE_SCLK(SCLK_SCALER27, "sclk_scaler27", "clk27m", SCLK_GATE, 17),
+ GATE_SCLK(SCLK_SCALER, "sclk_scaler", "dout_scaler", SCLK_GATE, 16),
+ GATE_SCLK(SCLK_LCD27, "sclk_lcd27", "clk27m", SCLK_GATE, 15),
+ GATE_SCLK(SCLK_LCD, "sclk_lcd", "dout_lcd", SCLK_GATE, 14),
+ GATE_SCLK(SCLK_POST0_27, "sclk_post0_27", "clk27m", SCLK_GATE, 12),
+ GATE_SCLK(SCLK_POST0, "sclk_post0", "dout_lcd", SCLK_GATE, 10),
+ GATE_SCLK(SCLK_AUDIO1, "sclk_audio1", "dout_audio1", SCLK_GATE, 9),
+ GATE_SCLK(SCLK_AUDIO0, "sclk_audio0", "dout_audio0", SCLK_GATE, 8),
+ GATE_SCLK(SCLK_SECUR, "sclk_secur", "dout_secur", SCLK_GATE, 7),
+ GATE_SCLK(SCLK_IRDA, "sclk_irda", "dout_irda", SCLK_GATE, 6),
+ GATE_SCLK(SCLK_UART, "sclk_uart", "dout_uart", SCLK_GATE, 5),
+ GATE_SCLK(SCLK_MFC, "sclk_mfc", "dout_mfc", SCLK_GATE, 3),
+ GATE_SCLK(SCLK_CAM, "sclk_cam", "dout_cam", SCLK_GATE, 2),
+ GATE_SCLK(SCLK_JPEG, "sclk_jpeg", "dout_jpeg", SCLK_GATE, 1),
+};
+
+/* List of clock gates present on S3C6400. */
+GATE_CLOCKS(s3c6400_gate_clks) __initdata = {
+ GATE_ON(HCLK_DDR0, "hclk_ddr0", "hclk", HCLK_GATE, 23),
+ GATE_SCLK(SCLK_ONENAND, "sclk_onenand", "parent", SCLK_GATE, 4),
+};
+
+/* List of clock gates present on S3C6410. */
+GATE_CLOCKS(s3c6410_gate_clks) __initdata = {
+ GATE_BUS(HCLK_3DSE, "hclk_3dse", "hclk", HCLK_GATE, 31),
+ GATE_ON(HCLK_IROM, "hclk_irom", "hclk", HCLK_GATE, 25),
+ GATE_ON(HCLK_MEM1, "hclk_mem1", "hclk", HCLK_GATE, 22),
+ GATE_ON(HCLK_MEM0, "hclk_mem0", "hclk", HCLK_GATE, 21),
+ GATE_BUS(HCLK_MFC, "hclk_mfc", "hclk", HCLK_GATE, 0),
+ GATE_BUS(PCLK_IIC1, "pclk_iic1", "pclk", PCLK_GATE, 27),
+ GATE_BUS(PCLK_IIS2, "pclk_iis2", "pclk", PCLK_GATE, 26),
+ GATE_SCLK(SCLK_FIMC, "sclk_fimc", "dout_fimc", SCLK_GATE, 13),
+ GATE_SCLK(SCLK_AUDIO2, "sclk_audio2", "dout_audio2", SCLK_GATE, 11),
+ GATE_BUS(MEM0_CFCON, "mem0_cfcon", "hclk_mem0", MEM0_GATE, 5),
+ GATE_BUS(MEM0_ONENAND1, "mem0_onenand1", "hclk_mem0", MEM0_GATE, 4),
+ GATE_BUS(MEM0_ONENAND0, "mem0_onenand0", "hclk_mem0", MEM0_GATE, 3),
+ GATE_BUS(MEM0_NFCON, "mem0_nfcon", "hclk_mem0", MEM0_GATE, 2),
+ GATE_ON(MEM0_SROM, "mem0_srom", "hclk_mem0", MEM0_GATE, 1),
+};
+
+/* List of PLL clocks. */
+static struct samsung_pll_clock s3c64xx_pll_clks[] __initdata = {
+ [apll] = PLL(pll_6552, FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON, NULL),
+ [mpll] = PLL(pll_6552, FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON, NULL),
+ [epll] = PLL(pll_6553, FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+};
+
+/* Aliases for common s3c64xx clocks. */
+static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
+ ALIAS(FOUT_APLL, NULL, "fout_apll"),
+ ALIAS(FOUT_MPLL, NULL, "fout_mpll"),
+ ALIAS(FOUT_EPLL, NULL, "fout_epll"),
+ ALIAS(MOUT_EPLL, NULL, "mout_epll"),
+ ALIAS(DOUT_MPLL, NULL, "dout_mpll"),
+ ALIAS(HCLKX2, NULL, "hclk2"),
+ ALIAS(HCLK, NULL, "hclk"),
+ ALIAS(PCLK, NULL, "pclk"),
+ ALIAS(PCLK, NULL, "clk_uart_baud2"),
+ ALIAS(ARMCLK, NULL, "armclk"),
+ ALIAS(HCLK_UHOST, "s3c2410-ohci", "usb-host"),
+ ALIAS(HCLK_USB, "s3c-hsotg", "otg"),
+ ALIAS(HCLK_HSMMC2, "s3c-sdhci.2", "hsmmc"),
+ ALIAS(HCLK_HSMMC2, "s3c-sdhci.2", "mmc_busclk.0"),
+ ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "hsmmc"),
+ ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"),
+ ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
+ ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
+ ALIAS(HCLK_DMA1, NULL, "dma1"),
+ ALIAS(HCLK_DMA0, NULL, "dma0"),
+ ALIAS(HCLK_CAMIF, "s3c-camif", "camif"),
+ ALIAS(HCLK_LCD, "s3c-fb", "lcd"),
+ ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi"),
+ ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi"),
+ ALIAS(PCLK_IIC0, "s3c2440-i2c.0", "i2c"),
+ ALIAS(PCLK_IIS1, "samsung-i2s.1", "iis"),
+ ALIAS(PCLK_IIS0, "samsung-i2s.0", "iis"),
+ ALIAS(PCLK_AC97, "samsung-ac97", "ac97"),
+ ALIAS(PCLK_TSADC, "s3c64xx-adc", "adc"),
+ ALIAS(PCLK_KEYPAD, "samsung-keypad", "keypad"),
+ ALIAS(PCLK_PCM1, "samsung-pcm.1", "pcm"),
+ ALIAS(PCLK_PCM0, "samsung-pcm.0", "pcm"),
+ ALIAS(PCLK_PWM, NULL, "timers"),
+ ALIAS(PCLK_RTC, "s3c64xx-rtc", "rtc"),
+ ALIAS(PCLK_WDT, NULL, "watchdog"),
+ ALIAS(PCLK_UART3, "s3c6400-uart.3", "uart"),
+ ALIAS(PCLK_UART2, "s3c6400-uart.2", "uart"),
+ ALIAS(PCLK_UART1, "s3c6400-uart.1", "uart"),
+ ALIAS(PCLK_UART0, "s3c6400-uart.0", "uart"),
+ ALIAS(SCLK_UHOST, "s3c2410-ohci", "usb-bus-host"),
+ ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
+ ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
+ ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
+ ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"),
+ ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"),
+ ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
+ ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
+ ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
+ ALIAS(SCLK_AUDIO0, "samsung-i2s.0", "audio-bus"),
+ ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
+ ALIAS(SCLK_CAM, "s3c-camif", "camera"),
+};
+
+/* Aliases for s3c6400-specific clocks. */
+static struct samsung_clock_alias s3c6400_clock_aliases[] = {
+ /* Nothing to place here yet. */
+};
+
+/* Aliases for s3c6410-specific clocks. */
+static struct samsung_clock_alias s3c6410_clock_aliases[] = {
+ ALIAS(PCLK_IIC1, "s3c2440-i2c.1", "i2c"),
+ ALIAS(PCLK_IIS2, "samsung-i2s.2", "iis"),
+ ALIAS(SCLK_FIMC, "s3c-camif", "fimc"),
+ ALIAS(SCLK_AUDIO2, "samsung-i2s.2", "audio-bus"),
+ ALIAS(MEM0_SROM, NULL, "srom"),
+};
+
+static void __init s3c64xx_clk_register_fixed_ext(unsigned long fin_pll_f,
+ unsigned long xusbxti_f)
+{
+ s3c64xx_fixed_rate_ext_clks[0].fixed_rate = fin_pll_f;
+ s3c64xx_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f;
+ samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_ext_clks,
+ ARRAY_SIZE(s3c64xx_fixed_rate_ext_clks));
+}
+
+/* Register s3c64xx clocks. */
+void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
+ unsigned long xusbxti_f, bool is_s3c6400,
+ void __iomem *reg_base)
+{
+ unsigned long *soc_regs = NULL;
+ unsigned long nr_soc_regs = 0;
+
+ if (np) {
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+ }
+
+ if (!is_s3c6400) {
+ soc_regs = s3c6410_clk_regs;
+ nr_soc_regs = ARRAY_SIZE(s3c6410_clk_regs);
+ }
+
+ samsung_clk_init(np, reg_base, NR_CLKS, s3c64xx_clk_regs,
+ ARRAY_SIZE(s3c64xx_clk_regs), soc_regs, nr_soc_regs);
+
+ /* Register external clocks. */
+ if (!np)
+ s3c64xx_clk_register_fixed_ext(xtal_f, xusbxti_f);
+
+ /* Register PLLs. */
+ samsung_clk_register_pll(s3c64xx_pll_clks,
+ ARRAY_SIZE(s3c64xx_pll_clks), reg_base);
+
+ /* Register common internal clocks. */
+ samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_clks,
+ ARRAY_SIZE(s3c64xx_fixed_rate_clks));
+ samsung_clk_register_mux(s3c64xx_mux_clks,
+ ARRAY_SIZE(s3c64xx_mux_clks));
+ samsung_clk_register_div(s3c64xx_div_clks,
+ ARRAY_SIZE(s3c64xx_div_clks));
+ samsung_clk_register_gate(s3c64xx_gate_clks,
+ ARRAY_SIZE(s3c64xx_gate_clks));
+
+ /* Register SoC-specific clocks. */
+ if (is_s3c6400) {
+ samsung_clk_register_mux(s3c6400_mux_clks,
+ ARRAY_SIZE(s3c6400_mux_clks));
+ samsung_clk_register_div(s3c6400_div_clks,
+ ARRAY_SIZE(s3c6400_div_clks));
+ samsung_clk_register_gate(s3c6400_gate_clks,
+ ARRAY_SIZE(s3c6400_gate_clks));
+ samsung_clk_register_alias(s3c6400_clock_aliases,
+ ARRAY_SIZE(s3c6400_clock_aliases));
+ } else {
+ samsung_clk_register_mux(s3c6410_mux_clks,
+ ARRAY_SIZE(s3c6410_mux_clks));
+ samsung_clk_register_div(s3c6410_div_clks,
+ ARRAY_SIZE(s3c6410_div_clks));
+ samsung_clk_register_gate(s3c6410_gate_clks,
+ ARRAY_SIZE(s3c6410_gate_clks));
+ samsung_clk_register_alias(s3c6410_clock_aliases,
+ ARRAY_SIZE(s3c6410_clock_aliases));
+ }
+
+ samsung_clk_register_alias(s3c64xx_clock_aliases,
+ ARRAY_SIZE(s3c64xx_clock_aliases));
+
+ pr_info("%s clocks: apll = %lu, mpll = %lu\n"
+ "\tepll = %lu, arm_clk = %lu\n",
+ is_s3c6400 ? "S3C6400" : "S3C6410",
+ _get_rate("fout_apll"), _get_rate("fout_mpll"),
+ _get_rate("fout_epll"), _get_rate("armclk"));
+}
+
+static void __init s3c6400_clk_init(struct device_node *np)
+{
+ s3c64xx_clk_init(np, 0, 0, true, NULL);
+}
+CLK_OF_DECLARE(s3c6400_clk, "samsung,s3c6400-clock", s3c6400_clk_init);
+
+static void __init s3c6410_clk_init(struct device_node *np)
+{
+ s3c64xx_clk_init(np, 0, 0, false, NULL);
+}
+CLK_OF_DECLARE(s3c6410_clk, "samsung,s3c6410-clock", s3c6410_clk_init);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index cd3c40a..f503f32 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -307,14 +307,12 @@ void __init samsung_clk_of_register_fixed_ext(
unsigned long _get_rate(const char *clk_name)
{
struct clk *clk;
- unsigned long rate;
- clk = clk_get(NULL, clk_name);
- if (IS_ERR(clk)) {
+ clk = __clk_lookup(clk_name);
+ if (!clk) {
pr_err("%s: could not find clock %s\n", __func__, clk_name);
return 0;
}
- rate = clk_get_rate(clk);
- clk_put(clk);
- return rate;
+
+ return clk_get_rate(clk);
}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 2f7dba2..31b4174 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include "clk-pll.h"
/**
* struct samsung_clock_alias: information about mux clock
@@ -39,6 +40,8 @@ struct samsung_clock_alias {
.alias = a, \
}
+#define MHZ (1000 * 1000)
+
/**
* struct samsung_fixed_rate_clock: information about fixed-rate clock
* @id: platform specific id of the clock.
@@ -127,7 +130,7 @@ struct samsung_mux_clock {
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
- .flags = f, \
+ .flags = (f) | CLK_SET_RATE_NO_REPARENT, \
.offset = o, \
.shift = s, \
.width = w, \
@@ -261,6 +264,54 @@ struct samsung_clk_reg_dump {
u32 value;
};
+/**
+ * struct samsung_pll_clock: information about pll clock
+ * @id: platform specific id of the clock.
+ * @dev_name: name of the device to which this clock belongs.
+ * @name: name of this pll clock.
+ * @parent_name: name of the parent clock.
+ * @flags: optional flags for basic clock.
+ * @con_offset: offset of the register for configuring the PLL.
+ * @lock_offset: offset of the register for locking the PLL.
+ * @type: Type of PLL to be registered.
+ * @alias: optional clock alias name to be assigned to this clock.
+ */
+struct samsung_pll_clock {
+ unsigned int id;
+ const char *dev_name;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ int con_offset;
+ int lock_offset;
+ enum samsung_pll_type type;
+ const struct samsung_pll_rate_table *rate_table;
+ const char *alias;
+};
+
+#define __PLL(_typ, _id, _dname, _name, _pname, _flags, _lock, _con, \
+ _rtable, _alias) \
+ { \
+ .id = _id, \
+ .type = _typ, \
+ .dev_name = _dname, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .flags = CLK_GET_RATE_NOCACHE, \
+ .con_offset = _con, \
+ .lock_offset = _lock, \
+ .rate_table = _rtable, \
+ .alias = _alias, \
+ }
+
+#define PLL(_typ, _id, _name, _pname, _lock, _con, _rtable) \
+ __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
+ _lock, _con, _rtable, _name)
+
+#define PLL_A(_typ, _id, _name, _pname, _lock, _con, _alias, _rtable) \
+ __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
+ _lock, _con, _rtable, _alias)
+
extern void __init samsung_clk_init(struct device_node *np, void __iomem *base,
unsigned long nr_clks, unsigned long *rdump,
unsigned long nr_rdump, unsigned long *soc_rdump,
@@ -284,6 +335,8 @@ extern void __init samsung_clk_register_div(struct samsung_div_clock *clk_list,
unsigned int nr_clk);
extern void __init samsung_clk_register_gate(
struct samsung_gate_clock *clk_list, unsigned int nr_clk);
+extern void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
+ unsigned int nr_clk, void __iomem *base);
extern unsigned long _get_rate(const char *clk_name);
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index aedbbe1..65894f7 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -416,9 +416,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* clock derived from 24 or 25 MHz osc clk */
/* vco-pll */
clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
- SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PLL_CFG, SPEAR1310_PLL1_CLK_SHIFT,
+ SPEAR1310_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco1_mclk", NULL);
clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
@@ -427,9 +427,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "pll1_clk", NULL);
clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
- SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PLL_CFG, SPEAR1310_PLL2_CLK_SHIFT,
+ SPEAR1310_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco2_mclk", NULL);
clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
@@ -438,9 +438,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "pll2_clk", NULL);
clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
- SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PLL_CFG, SPEAR1310_PLL3_CLK_SHIFT,
+ SPEAR1310_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco3_mclk", NULL);
clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
@@ -515,9 +515,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* gpt clocks */
clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
- SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT0_CLK_SHIFT,
+ SPEAR1310_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt0_mclk", NULL);
clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
@@ -525,9 +525,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "gpt0");
clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
- SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT1_CLK_SHIFT,
+ SPEAR1310_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt1_mclk", NULL);
clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
@@ -535,9 +535,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "gpt1");
clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
- SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT2_CLK_SHIFT,
+ SPEAR1310_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt2_mclk", NULL);
clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
@@ -545,9 +545,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "gpt2");
clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
- SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT3_CLK_SHIFT,
+ SPEAR1310_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt3_mclk", NULL);
clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
@@ -562,7 +562,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
- ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart0_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_UART_CLK_SHIFT,
SPEAR1310_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -602,7 +603,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
- ARRAY_SIZE(c3_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(c3_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_C3_CLK_SHIFT,
SPEAR1310_C3_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "c3_mclk", NULL);
@@ -614,8 +616,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* gmac */
clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
- ARRAY_SIZE(gmac_phy_input_parents), 0,
- SPEAR1310_GMAC_CLK_CFG,
+ ARRAY_SIZE(gmac_phy_input_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_GMAC_CLK_CFG,
SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "phy_input_mclk", NULL);
@@ -627,15 +629,16 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
- ARRAY_SIZE(gmac_phy_parents), 0,
+ ARRAY_SIZE(gmac_phy_parents), CLK_SET_RATE_NO_REPARENT,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "stmmacphy.0", NULL);
/* clcd */
clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
- ARRAY_SIZE(clcd_synth_parents), 0,
- SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
+ ARRAY_SIZE(clcd_synth_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_CLCD_CLK_SYNT,
+ SPEAR1310_CLCD_SYNT_CLK_SHIFT,
SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
@@ -645,7 +648,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, "clcd_syn_clk", NULL);
clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
- ARRAY_SIZE(clcd_pixel_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(clcd_pixel_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_pixel_mclk", NULL);
@@ -657,9 +661,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* i2s */
clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
- ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
- SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(i2s_src_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_I2S_CLK_CFG, SPEAR1310_I2S_SRC_CLK_SHIFT,
+ SPEAR1310_I2S_SRC_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_src_mclk", NULL);
clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
@@ -668,7 +672,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
- ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(i2s_ref_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1310_I2S_CLK_CFG, SPEAR1310_I2S_REF_SHIFT,
SPEAR1310_I2S_REF_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_ref_mclk", NULL);
@@ -806,13 +811,15 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* RAS clks */
clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
- ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+ ARRAY_SIZE(gen_synth0_1_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_PLL_CFG,
SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
- ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+ ARRAY_SIZE(gen_synth2_3_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_PLL_CFG,
SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
@@ -929,8 +936,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
smii_rgmii_phy_parents,
- ARRAY_SIZE(smii_rgmii_phy_parents), 0,
- SPEAR1310_RAS_CTRL_REG1,
+ ARRAY_SIZE(smii_rgmii_phy_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_RAS_CTRL_REG1,
SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
SPEAR1310_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "stmmacphy.1", NULL);
@@ -938,15 +945,15 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, "stmmacphy.4", NULL);
clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
- ARRAY_SIZE(rmii_phy_parents), 0,
+ ARRAY_SIZE(rmii_phy_parents), CLK_SET_RATE_NO_REPARENT,
SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
SPEAR1310_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "stmmacphy.3", NULL);
clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART1_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart1_mclk", NULL);
clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
@@ -955,9 +962,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5c800000.serial");
clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART2_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart2_mclk", NULL);
clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
@@ -966,9 +973,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5c900000.serial");
clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART3_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart3_mclk", NULL);
clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
@@ -977,9 +984,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5ca00000.serial");
clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART4_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart4_mclk", NULL);
clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
@@ -988,9 +995,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5cb00000.serial");
clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART5_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart5_mclk", NULL);
clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
@@ -999,9 +1006,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5cc00000.serial");
clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C1_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c1_mclk", NULL);
clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
@@ -1010,9 +1017,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5cd00000.i2c");
clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C2_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c2_mclk", NULL);
clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
@@ -1021,9 +1028,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5ce00000.i2c");
clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C3_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c3_mclk", NULL);
clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
@@ -1032,9 +1039,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5cf00000.i2c");
clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C4_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c4_mclk", NULL);
clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
@@ -1043,9 +1050,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d000000.i2c");
clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C5_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c5_mclk", NULL);
clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
@@ -1054,9 +1061,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d100000.i2c");
clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C6_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c6_mclk", NULL);
clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
@@ -1065,9 +1072,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d200000.i2c");
clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C7_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c7_mclk", NULL);
clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
@@ -1076,9 +1083,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d300000.i2c");
clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
- ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(ssp1_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_SSP1_CLK_SHIFT,
+ SPEAR1310_SSP1_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "ssp1_mclk", NULL);
clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
@@ -1087,9 +1094,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d400000.spi");
clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
- ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(pci_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_PCI_CLK_SHIFT,
+ SPEAR1310_PCI_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "pci_mclk", NULL);
clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
@@ -1098,9 +1105,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "pci");
clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
- ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(tdm_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_TDM1_CLK_SHIFT,
+ SPEAR1310_TDM_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "tdm1_mclk", NULL);
clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
@@ -1109,9 +1116,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
- ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(tdm_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_TDM2_CLK_SHIFT,
+ SPEAR1310_TDM_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "tdm2_mclk", NULL);
clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 9d0b394..fe835c1 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -473,9 +473,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* clock derived from 24 or 25 MHz osc clk */
/* vco-pll */
clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
- SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PLL_CFG, SPEAR1340_PLL1_CLK_SHIFT,
+ SPEAR1340_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco1_mclk", NULL);
clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
@@ -484,9 +484,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "pll1_clk", NULL);
clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
- SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PLL_CFG, SPEAR1340_PLL2_CLK_SHIFT,
+ SPEAR1340_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco2_mclk", NULL);
clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
@@ -495,9 +495,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "pll2_clk", NULL);
clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
- SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PLL_CFG, SPEAR1340_PLL3_CLK_SHIFT,
+ SPEAR1340_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco3_mclk", NULL);
clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
@@ -561,8 +561,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "amba_syn_clk", NULL);
clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
- ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
- SPEAR1340_SCLK_SRC_SEL_SHIFT,
+ ARRAY_SIZE(sys_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_SYS_CLK_CTRL, SPEAR1340_SCLK_SRC_SEL_SHIFT,
SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "sys_mclk", NULL);
@@ -583,8 +583,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "smp_twd");
clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
- ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
- SPEAR1340_HCLK_SRC_SEL_SHIFT,
+ ARRAY_SIZE(ahb_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_SYS_CLK_CTRL, SPEAR1340_HCLK_SRC_SEL_SHIFT,
SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "ahb_clk", NULL);
@@ -594,9 +594,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* gpt clocks */
clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT0_CLK_SHIFT,
+ SPEAR1340_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt0_mclk", NULL);
clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
@@ -604,9 +604,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "gpt0");
clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT1_CLK_SHIFT,
+ SPEAR1340_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt1_mclk", NULL);
clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
@@ -614,9 +614,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "gpt1");
clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT2_CLK_SHIFT,
+ SPEAR1340_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt2_mclk", NULL);
clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
@@ -624,9 +624,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "gpt2");
clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT3_CLK_SHIFT,
+ SPEAR1340_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt3_mclk", NULL);
clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
@@ -641,7 +641,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
- ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart0_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_UART0_CLK_SHIFT,
SPEAR1340_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -658,9 +659,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
- ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(uart1_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_UART1_CLK_SHIFT,
+ SPEAR1340_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart1_mclk", NULL);
clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
@@ -698,7 +699,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
- ARRAY_SIZE(c3_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(c3_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_C3_CLK_SHIFT,
SPEAR1340_C3_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "c3_mclk", NULL);
@@ -710,8 +712,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* gmac */
clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
- ARRAY_SIZE(gmac_phy_input_parents), 0,
- SPEAR1340_GMAC_CLK_CFG,
+ ARRAY_SIZE(gmac_phy_input_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1340_GMAC_CLK_CFG,
SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "phy_input_mclk", NULL);
@@ -723,15 +725,16 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
- ARRAY_SIZE(gmac_phy_parents), 0,
+ ARRAY_SIZE(gmac_phy_parents), CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "stmmacphy.0", NULL);
/* clcd */
clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
- ARRAY_SIZE(clcd_synth_parents), 0,
- SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
+ ARRAY_SIZE(clcd_synth_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1340_CLCD_CLK_SYNT,
+ SPEAR1340_CLCD_SYNT_CLK_SHIFT,
SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
@@ -741,7 +744,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "clcd_syn_clk", NULL);
clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
- ARRAY_SIZE(clcd_pixel_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(clcd_pixel_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_pixel_mclk", NULL);
@@ -753,9 +757,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* i2s */
clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
- ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
- SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(i2s_src_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_I2S_CLK_CFG, SPEAR1340_I2S_SRC_CLK_SHIFT,
+ SPEAR1340_I2S_SRC_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_src_mclk", NULL);
clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk",
@@ -765,7 +769,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
- ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(i2s_ref_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_I2S_CLK_CFG, SPEAR1340_I2S_REF_SHIFT,
SPEAR1340_I2S_REF_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_ref_mclk", NULL);
@@ -891,13 +896,15 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* RAS clks */
clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
- ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+ ARRAY_SIZE(gen_synth0_1_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1340_PLL_CFG,
SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen_syn0_1_mclk", NULL);
clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
- ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+ ARRAY_SIZE(gen_synth2_3_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1340_PLL_CFG,
SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen_syn2_3_mclk", NULL);
@@ -938,7 +945,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "spear_cec.1");
clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
- ARRAY_SIZE(spdif_out_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(spdif_out_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "spdif_out_mclk", NULL);
@@ -949,7 +957,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "d0000000.spdif-out");
clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
- ARRAY_SIZE(spdif_in_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(spdif_in_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "spdif_in_mclk", NULL);
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 080c3c5..c2d2043 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -294,7 +294,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
clk_register_clkdev(clk, NULL, "a9400000.i2s");
clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
- ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(i2s_ref_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_CONTROL_REG, I2S_REF_PCLK_SHIFT,
I2S_REF_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_ref_clk", NULL);
@@ -313,57 +314,66 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
clk_register_clkdev(clk, "hclk", "ab000000.eth");
clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_RS485_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a9300000.serial");
clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
- ARRAY_SIZE(sdhci_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdhci_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_CONTROL_REG, SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK,
0, &_lock);
clk_register_clkdev(clk, NULL, "70000000.sdhci");
clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
- ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
- SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
+ ARRAY_SIZE(smii0_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR320_CONTROL_REG, SMII_PCLK_SHIFT, SMII_PCLK_MASK,
+ 0, &_lock);
clk_register_clkdev(clk, NULL, "smii_pclk");
clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
clk_register_clkdev(clk, NULL, "smii");
clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
0, &_lock);
clk_register_clkdev(clk, NULL, "a3000000.serial");
clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a4000000.serial");
clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART3_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a9100000.serial");
clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART4_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a9200000.serial");
clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART5_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "60000000.serial");
clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART6_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "60100000.serial");
@@ -427,7 +437,8 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
- ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart0_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0,
&_lock);
clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -444,7 +455,8 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
- ARRAY_SIZE(firda_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(firda_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0,
&_lock);
clk_register_clkdev(clk, "firda_mclk", NULL);
@@ -458,14 +470,16 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
ARRAY_SIZE(gpt_rtbl), &_lock);
clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
- ARRAY_SIZE(gpt0_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(gpt0_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt0");
clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
ARRAY_SIZE(gpt_rtbl), &_lock);
clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
- ARRAY_SIZE(gpt1_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(gpt1_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt1_mclk", NULL);
clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk",
@@ -476,7 +490,8 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
ARRAY_SIZE(gpt_rtbl), &_lock);
clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
- ARRAY_SIZE(gpt2_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(gpt2_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt2_mclk", NULL);
clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk",
@@ -498,9 +513,9 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
- ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
- GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gen2_3_parents), CLK_SET_RATE_NO_REPARENT,
+ CORE_CLK_CFG, GEN_SYNTH2_3_CLK_SHIFT,
+ GEN_SYNTH2_3_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
@@ -540,8 +555,8 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_clkdev(clk, "ahbmult2_clk", NULL);
clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
- ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
- MCTR_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(ddr_parents), CLK_SET_RATE_NO_REPARENT,
+ PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "ddr_clk", NULL);
clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 9406f24..4f649c9 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -169,8 +169,9 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
- UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0,
+ &_lock);
clk_register_clkdev(clk, "uart_mclk", NULL);
clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
@@ -188,8 +189,9 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
- ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
- FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(firda_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0,
+ &_lock);
clk_register_clkdev(clk, "firda_mclk", NULL);
clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
@@ -203,8 +205,9 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
- ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
- CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(clcd_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0,
+ &_lock);
clk_register_clkdev(clk, "clcd_mclk", NULL);
clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
@@ -217,13 +220,13 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
- ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
- GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(gpt0_1_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt0");
clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
- ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
- GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(gpt0_1_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt1_mclk", NULL);
clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
@@ -235,8 +238,8 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
- ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
- GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(gpt2_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt2_mclk", NULL);
clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
@@ -248,8 +251,8 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
- ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
- GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(gpt3_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt3_mclk", NULL);
clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
@@ -277,8 +280,8 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "ahbmult2_clk", NULL);
clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
- ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
- MCTR_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(ddr_parents), CLK_SET_RATE_NO_REPARENT,
+ PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "ddr_clk", NULL);
clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 412912b..34ee69f 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -25,12 +25,12 @@
static DEFINE_SPINLOCK(clk_lock);
/**
- * sunxi_osc_clk_setup() - Setup function for gatable oscillator
+ * sun4i_osc_clk_setup() - Setup function for gatable oscillator
*/
#define SUNXI_OSC24M_GATE 0
-static void __init sunxi_osc_clk_setup(struct device_node *node)
+static void __init sun4i_osc_clk_setup(struct device_node *node)
{
struct clk *clk;
struct clk_fixed_rate *fixed;
@@ -64,22 +64,23 @@ static void __init sunxi_osc_clk_setup(struct device_node *node)
&gate->hw, &clk_gate_ops,
CLK_IS_ROOT);
- if (clk) {
+ if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
}
+CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-osc-clk", sun4i_osc_clk_setup);
/**
- * sunxi_get_pll1_factors() - calculates n, k, m, p factors for PLL1
+ * sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1
* PLL1 rate is calculated as follows
* rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
* parent_rate is always 24Mhz
*/
-static void sunxi_get_pll1_factors(u32 *freq, u32 parent_rate,
+static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
u8 *n, u8 *k, u8 *m, u8 *p)
{
u8 div;
@@ -124,15 +125,97 @@ static void sunxi_get_pll1_factors(u32 *freq, u32 parent_rate,
*n = div / 4;
}
+/**
+ * sun6i_a31_get_pll1_factors() - calculates n, k and m factors for PLL1
+ * PLL1 rate is calculated as follows
+ * rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
+ * parent_rate should always be 24MHz
+ */
+static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ /*
+ * We can operate only on MHz, this will make our life easier
+ * later.
+ */
+ u32 freq_mhz = *freq / 1000000;
+ u32 parent_freq_mhz = parent_rate / 1000000;
+
+ /*
+ * Round down the frequency to the closest multiple of either
+ * 6 or 16
+ */
+ u32 round_freq_6 = round_down(freq_mhz, 6);
+ u32 round_freq_16 = round_down(freq_mhz, 16);
+
+ if (round_freq_6 > round_freq_16)
+ freq_mhz = round_freq_6;
+ else
+ freq_mhz = round_freq_16;
+
+ *freq = freq_mhz * 1000000;
+ /*
+ * If the factors pointer are null, we were just called to
+ * round down the frequency.
+ * Exit.
+ */
+ if (n == NULL)
+ return;
+
+ /* If the frequency is a multiple of 32 MHz, k is always 3 */
+ if (!(freq_mhz % 32))
+ *k = 3;
+ /* If the frequency is a multiple of 9 MHz, k is always 2 */
+ else if (!(freq_mhz % 9))
+ *k = 2;
+ /* If the frequency is a multiple of 8 MHz, k is always 1 */
+ else if (!(freq_mhz % 8))
+ *k = 1;
+ /* Otherwise, we don't use the k factor */
+ else
+ *k = 0;
+
+ /*
+ * If the frequency is a multiple of 2 but not a multiple of
+ * 3, m is 3. This is the first time we use 6 here, yet we
+ * will use it on several other places.
+ * We use this number because it's the lowest frequency we can
+ * generate (with n = 0, k = 0, m = 3), so every other frequency
+ * somehow relates to this frequency.
+ */
+ if ((freq_mhz % 6) == 2 || (freq_mhz % 6) == 4)
+ *m = 2;
+ /*
+ * If the frequency is a multiple of 6MHz, but the factor is
+ * odd, m will be 3
+ */
+ else if ((freq_mhz / 6) & 1)
+ *m = 3;
+ /* Otherwise, we end up with m = 1 */
+ else
+ *m = 1;
+
+ /* Calculate n thanks to the above factors we already got */
+ *n = freq_mhz * (*m + 1) / ((*k + 1) * parent_freq_mhz) - 1;
+
+ /*
+ * If n end up being outbound, and that we can still decrease
+ * m, do it.
+ */
+ if ((*n + 1) > 31 && (*m + 1) > 1) {
+ *n = (*n + 1) / 2 - 1;
+ *m = (*m + 1) / 2 - 1;
+ }
+}
/**
- * sunxi_get_apb1_factors() - calculates m, p factors for APB1
+ * sun4i_get_apb1_factors() - calculates m, p factors for APB1
* APB1 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sunxi_get_apb1_factors(u32 *freq, u32 parent_rate,
+static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
u8 *n, u8 *k, u8 *m, u8 *p)
{
u8 calcm, calcp;
@@ -178,7 +261,7 @@ struct factors_data {
void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
};
-static struct clk_factors_config pll1_config = {
+static struct clk_factors_config sun4i_pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -189,21 +272,35 @@ static struct clk_factors_config pll1_config = {
.pwidth = 2,
};
-static struct clk_factors_config apb1_config = {
+static struct clk_factors_config sun6i_a31_pll1_config = {
+ .nshift = 8,
+ .nwidth = 5,
+ .kshift = 4,
+ .kwidth = 2,
+ .mshift = 0,
+ .mwidth = 2,
+};
+
+static struct clk_factors_config sun4i_apb1_config = {
.mshift = 0,
.mwidth = 5,
.pshift = 16,
.pwidth = 2,
};
-static const __initconst struct factors_data pll1_data = {
- .table = &pll1_config,
- .getter = sunxi_get_pll1_factors,
+static const struct factors_data sun4i_pll1_data __initconst = {
+ .table = &sun4i_pll1_config,
+ .getter = sun4i_get_pll1_factors,
};
-static const __initconst struct factors_data apb1_data = {
- .table = &apb1_config,
- .getter = sunxi_get_apb1_factors,
+static const struct factors_data sun6i_a31_pll1_data __initconst = {
+ .table = &sun6i_a31_pll1_config,
+ .getter = sun6i_a31_get_pll1_factors,
+};
+
+static const struct factors_data sun4i_apb1_data __initconst = {
+ .table = &sun4i_apb1_config,
+ .getter = sun4i_get_apb1_factors,
};
static void __init sunxi_factors_clk_setup(struct device_node *node,
@@ -221,7 +318,7 @@ static void __init sunxi_factors_clk_setup(struct device_node *node,
clk = clk_register_factors(NULL, clk_name, parent, 0, reg,
data->table, data->getter, &clk_lock);
- if (clk) {
+ if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
@@ -239,11 +336,15 @@ struct mux_data {
u8 shift;
};
-static const __initconst struct mux_data cpu_mux_data = {
+static const struct mux_data sun4i_cpu_mux_data __initconst = {
.shift = 16,
};
-static const __initconst struct mux_data apb1_mux_data = {
+static const struct mux_data sun6i_a31_ahb1_mux_data __initconst = {
+ .shift = 12,
+};
+
+static const struct mux_data sun4i_apb1_mux_data __initconst = {
.shift = 24,
};
@@ -261,7 +362,8 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
while (i < 5 && (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
i++;
- clk = clk_register_mux(NULL, clk_name, parents, i, 0, reg,
+ clk = clk_register_mux(NULL, clk_name, parents, i,
+ CLK_SET_RATE_NO_REPARENT, reg,
data->shift, SUNXI_MUX_GATE_WIDTH,
0, &clk_lock);
@@ -277,26 +379,34 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
* sunxi_divider_clk_setup() - Setup function for simple divider clocks
*/
-#define SUNXI_DIVISOR_WIDTH 2
-
struct div_data {
- u8 shift;
- u8 pow;
+ u8 shift;
+ u8 pow;
+ u8 width;
};
-static const __initconst struct div_data axi_data = {
- .shift = 0,
- .pow = 0,
+static const struct div_data sun4i_axi_data __initconst = {
+ .shift = 0,
+ .pow = 0,
+ .width = 2,
};
-static const __initconst struct div_data ahb_data = {
- .shift = 4,
- .pow = 1,
+static const struct div_data sun4i_ahb_data __initconst = {
+ .shift = 4,
+ .pow = 1,
+ .width = 2,
};
-static const __initconst struct div_data apb0_data = {
- .shift = 8,
- .pow = 1,
+static const struct div_data sun4i_apb0_data __initconst = {
+ .shift = 8,
+ .pow = 1,
+ .width = 2,
+};
+
+static const struct div_data sun6i_a31_apb2_div_data __initconst = {
+ .shift = 0,
+ .pow = 0,
+ .width = 4,
};
static void __init sunxi_divider_clk_setup(struct device_node *node,
@@ -312,7 +422,7 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
clk_parent = of_clk_get_parent_name(node, 0);
clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
- reg, data->shift, SUNXI_DIVISOR_WIDTH,
+ reg, data->shift, data->width,
data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
&clk_lock);
if (clk) {
@@ -333,34 +443,70 @@ struct gates_data {
DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
};
-static const __initconst struct gates_data sun4i_axi_gates_data = {
+static const struct gates_data sun4i_axi_gates_data __initconst = {
.mask = {1},
};
-static const __initconst struct gates_data sun4i_ahb_gates_data = {
+static const struct gates_data sun4i_ahb_gates_data __initconst = {
.mask = {0x7F77FFF, 0x14FB3F},
};
-static const __initconst struct gates_data sun5i_a13_ahb_gates_data = {
+static const struct gates_data sun5i_a10s_ahb_gates_data __initconst = {
+ .mask = {0x147667e7, 0x185915},
+};
+
+static const struct gates_data sun5i_a13_ahb_gates_data __initconst = {
.mask = {0x107067e7, 0x185111},
};
-static const __initconst struct gates_data sun4i_apb0_gates_data = {
+static const struct gates_data sun6i_a31_ahb1_gates_data __initconst = {
+ .mask = {0xEDFE7F62, 0x794F931},
+};
+
+static const struct gates_data sun7i_a20_ahb_gates_data __initconst = {
+ .mask = { 0x12f77fff, 0x16ff3f },
+};
+
+static const struct gates_data sun4i_apb0_gates_data __initconst = {
.mask = {0x4EF},
};
-static const __initconst struct gates_data sun5i_a13_apb0_gates_data = {
+static const struct gates_data sun5i_a10s_apb0_gates_data __initconst = {
+ .mask = {0x469},
+};
+
+static const struct gates_data sun5i_a13_apb0_gates_data __initconst = {
.mask = {0x61},
};
-static const __initconst struct gates_data sun4i_apb1_gates_data = {
+static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
+ .mask = { 0x4ff },
+};
+
+static const struct gates_data sun4i_apb1_gates_data __initconst = {
.mask = {0xFF00F7},
};
-static const __initconst struct gates_data sun5i_a13_apb1_gates_data = {
+static const struct gates_data sun5i_a10s_apb1_gates_data __initconst = {
+ .mask = {0xf0007},
+};
+
+static const struct gates_data sun5i_a13_apb1_gates_data __initconst = {
.mask = {0xa0007},
};
+static const struct gates_data sun6i_a31_apb1_gates_data __initconst = {
+ .mask = {0x3031},
+};
+
+static const struct gates_data sun6i_a31_apb2_gates_data __initconst = {
+ .mask = {0x3F000F},
+};
+
+static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
+ .mask = { 0xff80ff },
+};
+
static void __init sunxi_gates_clk_setup(struct device_node *node,
struct gates_data *data)
{
@@ -410,43 +556,49 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
-/* Matches for of_clk_init */
-static const __initconst struct of_device_id clk_match[] = {
- {.compatible = "allwinner,sun4i-osc-clk", .data = sunxi_osc_clk_setup,},
- {}
-};
-
/* Matches for factors clocks */
-static const __initconst struct of_device_id clk_factors_match[] = {
- {.compatible = "allwinner,sun4i-pll1-clk", .data = &pll1_data,},
- {.compatible = "allwinner,sun4i-apb1-clk", .data = &apb1_data,},
+static const struct of_device_id clk_factors_match[] __initconst = {
+ {.compatible = "allwinner,sun4i-pll1-clk", .data = &sun4i_pll1_data,},
+ {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
+ {.compatible = "allwinner,sun4i-apb1-clk", .data = &sun4i_apb1_data,},
{}
};
/* Matches for divider clocks */
-static const __initconst struct of_device_id clk_div_match[] = {
- {.compatible = "allwinner,sun4i-axi-clk", .data = &axi_data,},
- {.compatible = "allwinner,sun4i-ahb-clk", .data = &ahb_data,},
- {.compatible = "allwinner,sun4i-apb0-clk", .data = &apb0_data,},
+static const struct of_device_id clk_div_match[] __initconst = {
+ {.compatible = "allwinner,sun4i-axi-clk", .data = &sun4i_axi_data,},
+ {.compatible = "allwinner,sun4i-ahb-clk", .data = &sun4i_ahb_data,},
+ {.compatible = "allwinner,sun4i-apb0-clk", .data = &sun4i_apb0_data,},
+ {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
{}
};
/* Matches for mux clocks */
-static const __initconst struct of_device_id clk_mux_match[] = {
- {.compatible = "allwinner,sun4i-cpu-clk", .data = &cpu_mux_data,},
- {.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &apb1_mux_data,},
+static const struct of_device_id clk_mux_match[] __initconst = {
+ {.compatible = "allwinner,sun4i-cpu-clk", .data = &sun4i_cpu_mux_data,},
+ {.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
+ {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
{}
};
/* Matches for gate clocks */
-static const __initconst struct of_device_id clk_gates_match[] = {
+static const struct of_device_id clk_gates_match[] __initconst = {
{.compatible = "allwinner,sun4i-axi-gates-clk", .data = &sun4i_axi_gates_data,},
{.compatible = "allwinner,sun4i-ahb-gates-clk", .data = &sun4i_ahb_gates_data,},
+ {.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,},
{.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
+ {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
+ {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
{.compatible = "allwinner,sun4i-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
+ {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
{.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
+ {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
{.compatible = "allwinner,sun4i-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
+ {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
{.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
+ {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
+ {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
+ {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
{}
};
@@ -467,8 +619,8 @@ static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_mat
void __init sunxi_init_clocks(void)
{
- /* Register all the simple sunxi clocks on DT */
- of_clk_init(clk_match);
+ /* Register all the simple and basic clocks on DT */
+ of_clk_init(NULL);
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 806d803..9467da7 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1566,7 +1566,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio0 */
clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0,
NULL);
clks[audio0_mux] = clk;
@@ -1578,7 +1579,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio1 */
clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0,
NULL);
clks[audio1_mux] = clk;
@@ -1590,7 +1592,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio2 */
clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0,
NULL);
clks[audio2_mux] = clk;
@@ -1602,7 +1605,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio3 */
clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0,
NULL);
clks[audio3_mux] = clk;
@@ -1614,7 +1618,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio4 */
clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0,
NULL);
clks[audio4_mux] = clk;
@@ -1626,7 +1631,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* spdif */
clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0,
NULL);
clks[spdif_mux] = clk;
@@ -1721,7 +1727,8 @@ static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
/* clk_out_1 */
clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
- ARRAY_SIZE(clk_out1_parents), 0,
+ ARRAY_SIZE(clk_out1_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
&clk_out_lock);
clks[clk_out_1_mux] = clk;
@@ -1733,7 +1740,8 @@ static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
/* clk_out_2 */
clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
- ARRAY_SIZE(clk_out2_parents), 0,
+ ARRAY_SIZE(clk_out2_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
&clk_out_lock);
clks[clk_out_2_mux] = clk;
@@ -1745,7 +1753,8 @@ static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
/* clk_out_3 */
clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
- ARRAY_SIZE(clk_out3_parents), 0,
+ ARRAY_SIZE(clk_out3_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
&clk_out_lock);
clks[clk_out_3_mux] = clk;
@@ -2063,7 +2072,8 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base)
/* dsia */
clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
- ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
clks[dsia_mux] = clk;
clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
@@ -2073,7 +2083,8 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base)
/* dsib */
clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
- ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
clks[dsib_mux] = clk;
clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
@@ -2110,7 +2121,8 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base)
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ ARRAY_SIZE(mux_pllmcp_clkm),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
29, 3, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base,
@@ -2194,7 +2206,7 @@ static const struct of_device_id pmc_match[] __initconst = {
* dfll_soc/dfll_ref apparently must be kept enabled, otherwise I2C5
* breaks
*/
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
{uarta, pll_p, 408000000, 0},
{uartb, pll_p, 408000000, 0},
{uartc, pll_p, 408000000, 0},
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 759ca47..056f649 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -778,7 +778,8 @@ static void __init tegra20_audio_clk_init(void)
/* audio */
clk = clk_register_mux(NULL, "audio_mux", audio_parents,
- ARRAY_SIZE(audio_parents), 0,
+ ARRAY_SIZE(audio_parents),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio", "audio_mux", 0,
clk_base + AUDIO_SYNC_CLK, 4,
@@ -941,7 +942,8 @@ static void __init tegra20_periph_clk_init(void)
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ ARRAY_SIZE(mux_pllmcp_clkm),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
@@ -1223,7 +1225,7 @@ static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
#endif
};
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
{pll_p, clk_max, 216000000, 1},
{pll_p_out1, clk_max, 28800000, 1},
{pll_p_out2, clk_max, 48000000, 1},
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index e2c6ca0..dbe7c80 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -971,7 +971,7 @@ static void __init tegra30_pll_init(void)
/* PLLU */
clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ TEGRA_PLL_SET_LFCON,
pll_u_freq_table,
NULL);
clk_register_clkdev(clk, "pll_u", NULL);
@@ -1026,7 +1026,8 @@ static void __init tegra30_pll_init(void)
/* PLLE */
clk = clk_register_mux(NULL, "pll_e_mux", pll_e_parents,
- ARRAY_SIZE(pll_e_parents), 0,
+ ARRAY_SIZE(pll_e_parents),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + PLLE_AUX, 2, 1, 0, NULL);
clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
CLK_GET_RATE_NOCACHE, 100000000, &pll_e_params,
@@ -1086,7 +1087,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio0 */
clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S0, 4,
@@ -1096,7 +1098,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio1 */
clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S1, 4,
@@ -1106,7 +1109,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio2 */
clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S2, 4,
@@ -1116,7 +1120,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio3 */
clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S3, 4,
@@ -1126,7 +1131,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio4 */
clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S4, 4,
@@ -1136,7 +1142,8 @@ static void __init tegra30_audio_clk_init(void)
/* spdif */
clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
@@ -1229,7 +1236,8 @@ static void __init tegra30_pmc_clk_init(void)
/* clk_out_1 */
clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
- ARRAY_SIZE(clk_out1_parents), 0,
+ ARRAY_SIZE(clk_out1_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
&clk_out_lock);
clks[clk_out_1_mux] = clk;
@@ -1241,7 +1249,8 @@ static void __init tegra30_pmc_clk_init(void)
/* clk_out_2 */
clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
- ARRAY_SIZE(clk_out2_parents), 0,
+ ARRAY_SIZE(clk_out2_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
&clk_out_lock);
clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
@@ -1252,7 +1261,8 @@ static void __init tegra30_pmc_clk_init(void)
/* clk_out_3 */
clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
- ARRAY_SIZE(clk_out3_parents), 0,
+ ARRAY_SIZE(clk_out3_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
&clk_out_lock);
clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
@@ -1679,7 +1689,8 @@ static void __init tegra30_periph_clk_init(void)
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ ARRAY_SIZE(mux_pllmcp_clkm),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
@@ -1901,7 +1912,7 @@ static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
#endif
};
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
{uarta, pll_p, 408000000, 0},
{uartb, pll_p, 408000000, 0},
{uartc, pll_p, 408000000, 0},
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
index a4a728d..2d5e1b4 100644
--- a/drivers/clk/versatile/clk-vexpress.c
+++ b/drivers/clk/versatile/clk-vexpress.c
@@ -37,8 +37,8 @@ static void __init vexpress_sp810_init(void __iomem *base)
snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
vexpress_sp810_timerclken[i] = clk_register_mux(NULL, name,
- parents, 2, 0, base + SCCTRL,
- SCCTRL_TIMERENnSEL_SHIFT(i), 1,
+ parents, 2, CLK_SET_RATE_NO_REPARENT,
+ base + SCCTRL, SCCTRL_TIMERENnSEL_SHIFT(i), 1,
0, &vexpress_sp810_lock);
if (WARN_ON(IS_ERR(vexpress_sp810_timerclken[i])))
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 089d3e3..cc40fe6 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -125,8 +125,9 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
div0_name = kasprintf(GFP_KERNEL, "%s_div0", clk_name);
div1_name = kasprintf(GFP_KERNEL, "%s_div1", clk_name);
- clk = clk_register_mux(NULL, mux_name, parents, 4, 0,
- fclk_ctrl_reg, 4, 2, 0, fclk_lock);
+ clk = clk_register_mux(NULL, mux_name, parents, 4,
+ CLK_SET_RATE_NO_REPARENT, fclk_ctrl_reg, 4, 2, 0,
+ fclk_lock);
clk = clk_register_divider(NULL, div0_name, mux_name,
0, fclk_ctrl_reg, 8, 6, CLK_DIVIDER_ONE_BASED |
@@ -168,8 +169,8 @@ static void __init zynq_clk_register_periph_clk(enum zynq_clk clk0,
mux_name = kasprintf(GFP_KERNEL, "%s_mux", clk_name0);
div_name = kasprintf(GFP_KERNEL, "%s_div", clk_name0);
- clk = clk_register_mux(NULL, mux_name, parents, 4, 0,
- clk_ctrl, 4, 2, 0, lock);
+ clk = clk_register_mux(NULL, mux_name, parents, 4,
+ CLK_SET_RATE_NO_REPARENT, clk_ctrl, 4, 2, 0, lock);
clk = clk_register_divider(NULL, div_name, mux_name, 0, clk_ctrl, 8, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, lock);
@@ -236,25 +237,26 @@ static void __init zynq_clk_setup(struct device_node *np)
clk = clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
SLCR_PLL_STATUS, 0, &armpll_lock);
clks[armpll] = clk_register_mux(NULL, clk_output_name[armpll],
- armpll_parents, 2, 0, SLCR_ARMPLL_CTRL, 4, 1, 0,
- &armpll_lock);
+ armpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+ SLCR_ARMPLL_CTRL, 4, 1, 0, &armpll_lock);
clk = clk_register_zynq_pll("ddrpll_int", "ps_clk", SLCR_DDRPLL_CTRL,
SLCR_PLL_STATUS, 1, &ddrpll_lock);
clks[ddrpll] = clk_register_mux(NULL, clk_output_name[ddrpll],
- ddrpll_parents, 2, 0, SLCR_DDRPLL_CTRL, 4, 1, 0,
- &ddrpll_lock);
+ ddrpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+ SLCR_DDRPLL_CTRL, 4, 1, 0, &ddrpll_lock);
clk = clk_register_zynq_pll("iopll_int", "ps_clk", SLCR_IOPLL_CTRL,
SLCR_PLL_STATUS, 2, &iopll_lock);
clks[iopll] = clk_register_mux(NULL, clk_output_name[iopll],
- iopll_parents, 2, 0, SLCR_IOPLL_CTRL, 4, 1, 0,
- &iopll_lock);
+ iopll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+ SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock);
/* CPU clocks */
tmp = readl(SLCR_621_TRUE) & 1;
- clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4, 0,
- SLCR_ARM_CLK_CTRL, 4, 2, 0, &armclk_lock);
+ clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
+ &armclk_lock);
clk = clk_register_divider(NULL, "cpu_div", "cpu_mux", 0,
SLCR_ARM_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &armclk_lock);
@@ -293,8 +295,9 @@ static void __init zynq_clk_setup(struct device_node *np)
swdt_ext_clk_mux_parents[i + 1] = dummy_nm;
}
clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
- swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
+ swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_SWDT_CLK_SEL, 0, 1, 0,
+ &swdtclk_lock);
/* DDR clocks */
clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -356,8 +359,9 @@ static void __init zynq_clk_setup(struct device_node *np)
gem0_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4, 0,
- SLCR_GEM0_CLK_CTRL, 4, 2, 0, &gem0clk_lock);
+ clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_GEM0_CLK_CTRL, 4, 2, 0,
+ &gem0clk_lock);
clk = clk_register_divider(NULL, "gem0_div0", "gem0_mux", 0,
SLCR_GEM0_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &gem0clk_lock);
@@ -366,7 +370,8 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem0clk_lock);
clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
- CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ SLCR_GEM0_CLK_CTRL, 6, 1, 0,
&gem0clk_lock);
clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
"gem0_emio_mux", CLK_SET_RATE_PARENT,
@@ -379,8 +384,9 @@ static void __init zynq_clk_setup(struct device_node *np)
gem1_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4, 0,
- SLCR_GEM1_CLK_CTRL, 4, 2, 0, &gem1clk_lock);
+ clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_GEM1_CLK_CTRL, 4, 2, 0,
+ &gem1clk_lock);
clk = clk_register_divider(NULL, "gem1_div0", "gem1_mux", 0,
SLCR_GEM1_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &gem1clk_lock);
@@ -389,7 +395,8 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem1clk_lock);
clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
- CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ SLCR_GEM1_CLK_CTRL, 6, 1, 0,
&gem1clk_lock);
clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
"gem1_emio_mux", CLK_SET_RATE_PARENT,
@@ -409,8 +416,9 @@ static void __init zynq_clk_setup(struct device_node *np)
can_mio_mux_parents[i] = dummy_nm;
}
kfree(clk_name);
- clk = clk_register_mux(NULL, "can_mux", periph_parents, 4, 0,
- SLCR_CAN_CLK_CTRL, 4, 2, 0, &canclk_lock);
+ clk = clk_register_mux(NULL, "can_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
+ &canclk_lock);
clk = clk_register_divider(NULL, "can_div0", "can_mux", 0,
SLCR_CAN_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &canclk_lock);
@@ -425,17 +433,21 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 1, 0,
&canclk_lock);
clk = clk_register_mux(NULL, "can0_mio_mux",
- can_mio_mux_parents, 54, CLK_SET_RATE_PARENT,
- SLCR_CAN_MIOCLK_CTRL, 0, 6, 0, &canmioclk_lock);
+ can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 0, 6, 0,
+ &canmioclk_lock);
clk = clk_register_mux(NULL, "can1_mio_mux",
- can_mio_mux_parents, 54, CLK_SET_RATE_PARENT,
- SLCR_CAN_MIOCLK_CTRL, 16, 6, 0, &canmioclk_lock);
+ can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 16, 6,
+ 0, &canmioclk_lock);
clks[can0] = clk_register_mux(NULL, clk_output_name[can0],
- can0_mio_mux2_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_CAN_MIOCLK_CTRL, 6, 1, 0, &canmioclk_lock);
+ can0_mio_mux2_parents, 2, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 6, 1, 0,
+ &canmioclk_lock);
clks[can1] = clk_register_mux(NULL, clk_output_name[can1],
- can1_mio_mux2_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_CAN_MIOCLK_CTRL, 22, 1, 0, &canmioclk_lock);
+ can1_mio_mux2_parents, 2, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 22, 1,
+ 0, &canmioclk_lock);
for (i = 0; i < ARRAY_SIZE(dbgtrc_emio_input_names); i++) {
int idx = of_property_match_string(np, "clock-names",
@@ -444,13 +456,15 @@ static void __init zynq_clk_setup(struct device_node *np)
dbg_emio_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4, 0,
- SLCR_DBG_CLK_CTRL, 4, 2, 0, &dbgclk_lock);
+ clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 4, 2, 0,
+ &dbgclk_lock);
clk = clk_register_divider(NULL, "dbg_div", "dbg_mux", 0,
SLCR_DBG_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &dbgclk_lock);
- clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2, 0,
- SLCR_DBG_CLK_CTRL, 6, 1, 0, &dbgclk_lock);
+ clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2,
+ CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 6, 1, 0,
+ &dbgclk_lock);
clks[dbg_trc] = clk_register_gate(NULL, clk_output_name[dbg_trc],
"dbg_emio_mux", CLK_SET_RATE_PARENT, SLCR_DBG_CLK_CTRL,
0, 0, &dbgclk_lock);
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 47e307c..3226f54 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -50,6 +50,9 @@ struct zynq_pll {
#define PLLCTRL_RESET_MASK 1
#define PLLCTRL_RESET_SHIFT 0
+#define PLL_FBDIV_MIN 13
+#define PLL_FBDIV_MAX 66
+
/**
* zynq_pll_round_rate() - Round a clock frequency
* @hw: Handle between common and hardware-specific interfaces
@@ -63,10 +66,10 @@ static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
u32 fbdiv;
fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
- if (fbdiv < 13)
- fbdiv = 13;
- else if (fbdiv > 66)
- fbdiv = 66;
+ if (fbdiv < PLL_FBDIV_MIN)
+ fbdiv = PLL_FBDIV_MIN;
+ else if (fbdiv > PLL_FBDIV_MAX)
+ fbdiv = PLL_FBDIV_MAX;
return *prate * fbdiv;
}
@@ -182,7 +185,13 @@ static const struct clk_ops zynq_pll_ops = {
/**
* clk_register_zynq_pll() - Register PLL with the clock framework
- * @np Pointer to the DT device node
+ * @name PLL name
+ * @parent Parent clock name
+ * @pll_ctrl Pointer to PLL control register
+ * @pll_status Pointer to PLL status register
+ * @lock_index Bit index to this PLL's lock status bit in @pll_status
+ * @lock Register lock
+ * Returns handle to the registered clock.
*/
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index,
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 4329a29..b9c81b7 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -315,68 +315,47 @@ static int em_sti_probe(struct platform_device *pdev)
{
struct em_sti_priv *p;
struct resource *res;
- int irq, ret;
+ int irq;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
p->pdev = pdev;
platform_set_drvdata(pdev, p);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- ret = -EINVAL;
- goto err0;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get irq\n");
- ret = -EINVAL;
- goto err0;
+ return -EINVAL;
}
/* map memory, let base point to the STI instance */
- p->base = ioremap_nocache(res->start, resource_size(res));
- if (p->base == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- ret = -ENXIO;
- goto err0;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(p->base))
+ return PTR_ERR(p->base);
/* get hold of clock */
- p->clk = clk_get(&pdev->dev, "sclk");
+ p->clk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err1;
+ return PTR_ERR(p->clk);
}
- if (request_irq(irq, em_sti_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
- dev_name(&pdev->dev), p)) {
+ if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p)) {
dev_err(&pdev->dev, "failed to request low IRQ\n");
- ret = -ENOENT;
- goto err2;
+ return -ENOENT;
}
raw_spin_lock_init(&p->lock);
em_sti_register_clockevent(p);
em_sti_register_clocksource(p);
return 0;
-
-err2:
- clk_put(p->clk);
-err1:
- iounmap(p->base);
-err0:
- kfree(p);
- return ret;
}
static int em_sti_remove(struct platform_device *pdev)
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 7d2c2c5..1b74bea 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -165,7 +165,8 @@ static void nmdk_clkevt_resume(struct clock_event_device *cedev)
static struct clock_event_device nmdk_clkevt = {
.name = "mtu_1",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DYNIRQ,
.rating = 200,
.set_mode = nmdk_clkevt_mode,
.set_next_event = nmdk_clkevt_next,
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index ac60f8b..ab29476 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -368,10 +368,6 @@ static void __init samsung_clocksource_init(void)
static void __init samsung_timer_resources(void)
{
- pwm.timerclk = clk_get(NULL, "timers");
- if (IS_ERR(pwm.timerclk))
- panic("failed to get timers clock for timer");
-
clk_prepare_enable(pwm.timerclk);
pwm.tcnt_max = (1UL << pwm.variant.bits) - 1;
@@ -416,6 +412,10 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
memcpy(pwm.irq, irqs, SAMSUNG_PWM_NUM * sizeof(*irqs));
+ pwm.timerclk = clk_get(NULL, "timers");
+ if (IS_ERR(pwm.timerclk))
+ panic("failed to get timers clock for timer");
+
_samsung_pwm_clocksource_init();
}
@@ -447,6 +447,10 @@ static void __init samsung_pwm_alloc(struct device_node *np,
return;
}
+ pwm.timerclk = of_clk_get_by_name(np, "timers");
+ if (IS_ERR(pwm.timerclk))
+ panic("failed to get timers clock for timer");
+
_samsung_pwm_clocksource_init();
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 08d0c41..0965e98 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -37,6 +37,7 @@
struct sh_cmt_priv {
void __iomem *mapbase;
+ void __iomem *mapbase_str;
struct clk *clk;
unsigned long width; /* 16 or 32 bit version of hardware block */
unsigned long overflow_bit;
@@ -79,6 +80,12 @@ struct sh_cmt_priv {
* CMCSR 0xffca0060 16-bit
* CMCNT 0xffca0064 32-bit
* CMCOR 0xffca0068 32-bit
+ *
+ * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790:
+ * CMSTR 0xffca0500 32-bit
+ * CMCSR 0xffca0510 32-bit
+ * CMCNT 0xffca0514 32-bit
+ * CMCOR 0xffca0518 32-bit
*/
static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
@@ -109,9 +116,7 @@ static void sh_cmt_write32(void __iomem *base, unsigned long offs,
static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
-
- return p->read_control(p->mapbase - cfg->channel_offset, 0);
+ return p->read_control(p->mapbase_str, 0);
}
static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
@@ -127,9 +132,7 @@ static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
unsigned long value)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
-
- p->write_control(p->mapbase - cfg->channel_offset, 0, value);
+ p->write_control(p->mapbase_str, 0, value);
}
static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
@@ -676,7 +679,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
{
struct sh_timer_config *cfg = pdev->dev.platform_data;
- struct resource *res;
+ struct resource *res, *res2;
int irq, ret;
ret = -ENXIO;
@@ -694,6 +697,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err0;
}
+ /* optional resource for the shared timer start/stop register */
+ res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);
+
irq = platform_get_irq(p->pdev, 0);
if (irq < 0) {
dev_err(&p->pdev->dev, "failed to get irq\n");
@@ -707,6 +713,15 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err0;
}
+ /* map second resource for CMSTR */
+ p->mapbase_str = ioremap_nocache(res2 ? res2->start :
+ res->start - cfg->channel_offset,
+ res2 ? resource_size(res2) : 2);
+ if (p->mapbase_str == NULL) {
+ dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
+ goto err1;
+ }
+
/* request irq using setup_irq() (too early for request_irq()) */
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
@@ -719,11 +734,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
if (IS_ERR(p->clk)) {
dev_err(&p->pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
- goto err1;
+ goto err2;
}
- p->read_control = sh_cmt_read16;
- p->write_control = sh_cmt_write16;
+ if (res2 && (resource_size(res2) == 4)) {
+ /* assume both CMSTR and CMCSR to be 32-bit */
+ p->read_control = sh_cmt_read32;
+ p->write_control = sh_cmt_write32;
+ } else {
+ p->read_control = sh_cmt_read16;
+ p->write_control = sh_cmt_write16;
+ }
if (resource_size(res) == 6) {
p->width = 16;
@@ -752,22 +773,23 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
cfg->clocksource_rating);
if (ret) {
dev_err(&p->pdev->dev, "registration failed\n");
- goto err2;
+ goto err3;
}
p->cs_enabled = false;
ret = setup_irq(irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
- goto err2;
+ goto err3;
}
platform_set_drvdata(pdev, p);
return 0;
-err2:
+err3:
clk_put(p->clk);
-
+err2:
+ iounmap(p->mapbase_str);
err1:
iounmap(p->mapbase);
err0:
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 847cab6..0198504 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -13,6 +13,19 @@
*
* Timer 0 is used as free-running clocksource, while timer 1 is
* used as clock_event_device.
+ *
+ * ---
+ * Clocksource driver for Armada 370 and Armada XP SoC.
+ * This driver implements one compatible string for each SoC, given
+ * each has its own characteristics:
+ *
+ * * Armada 370 has no 25 MHz fixed timer.
+ *
+ * * Armada XP cannot work properly without such 25 MHz fixed timer as
+ * doing otherwise leads to using a clocksource whose frequency varies
+ * when doing cpufreq frequency changes.
+ *
+ * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
*/
#include <linux/init.h>
@@ -30,19 +43,18 @@
#include <linux/module.h>
#include <linux/sched_clock.h>
#include <linux/percpu.h>
-#include <linux/time-armada-370-xp.h>
/*
* Timer block registers.
*/
#define TIMER_CTRL_OFF 0x0000
-#define TIMER0_EN 0x0001
-#define TIMER0_RELOAD_EN 0x0002
-#define TIMER0_25MHZ 0x0800
+#define TIMER0_EN BIT(0)
+#define TIMER0_RELOAD_EN BIT(1)
+#define TIMER0_25MHZ BIT(11)
#define TIMER0_DIV(div) ((div) << 19)
-#define TIMER1_EN 0x0004
-#define TIMER1_RELOAD_EN 0x0008
-#define TIMER1_25MHZ 0x1000
+#define TIMER1_EN BIT(2)
+#define TIMER1_RELOAD_EN BIT(3)
+#define TIMER1_25MHZ BIT(12)
#define TIMER1_DIV(div) ((div) << 22)
#define TIMER_EVENTS_STATUS 0x0004
#define TIMER0_CLR_MASK (~0x1)
@@ -72,6 +84,18 @@ static u32 ticks_per_jiffy;
static struct clock_event_device __percpu *armada_370_xp_evt;
+static void timer_ctrl_clrset(u32 clr, u32 set)
+{
+ writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set,
+ timer_base + TIMER_CTRL_OFF);
+}
+
+static void local_timer_ctrl_clrset(u32 clr, u32 set)
+{
+ writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
+ local_base + TIMER_CTRL_OFF);
+}
+
static u32 notrace armada_370_xp_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -84,7 +108,6 @@ static int
armada_370_xp_clkevt_next_event(unsigned long delta,
struct clock_event_device *dev)
{
- u32 u;
/*
* Clear clockevent timer interrupt.
*/
@@ -98,11 +121,8 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
/*
* Enable the timer.
*/
- u = readl(local_base + TIMER_CTRL_OFF);
- u = ((u & ~TIMER0_RELOAD_EN) | TIMER0_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT));
- writel(u, local_base + TIMER_CTRL_OFF);
-
+ local_timer_ctrl_clrset(TIMER0_RELOAD_EN,
+ TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT));
return 0;
}
@@ -110,8 +130,6 @@ static void
armada_370_xp_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{
- u32 u;
-
if (mode == CLOCK_EVT_MODE_PERIODIC) {
/*
@@ -123,18 +141,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
/*
* Enable timer.
*/
-
- u = readl(local_base + TIMER_CTRL_OFF);
-
- writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT)),
- local_base + TIMER_CTRL_OFF);
+ local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN |
+ TIMER0_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT));
} else {
/*
* Disable timer.
*/
- u = readl(local_base + TIMER_CTRL_OFF);
- writel(u & ~TIMER0_EN, local_base + TIMER_CTRL_OFF);
+ local_timer_ctrl_clrset(TIMER0_EN, 0);
/*
* ACK pending timer interrupt.
@@ -163,14 +177,14 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
*/
static int armada_370_xp_timer_setup(struct clock_event_device *evt)
{
- u32 u;
+ u32 clr = 0, set = 0;
int cpu = smp_processor_id();
- u = readl(local_base + TIMER_CTRL_OFF);
if (timer25Mhz)
- writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+ set = TIMER0_25MHZ;
else
- writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+ clr = TIMER0_25MHZ;
+ local_timer_ctrl_clrset(clr, set);
evt->name = "armada_370_xp_per_cpu_tick",
evt->features = CLOCK_EVT_FEAT_ONESHOT |
@@ -217,36 +231,21 @@ static struct notifier_block armada_370_xp_timer_cpu_nb = {
.notifier_call = armada_370_xp_timer_cpu_notify,
};
-void __init armada_370_xp_timer_init(void)
+static void __init armada_370_xp_timer_common_init(struct device_node *np)
{
- u32 u;
- struct device_node *np;
+ u32 clr = 0, set = 0;
int res;
- np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
timer_base = of_iomap(np, 0);
WARN_ON(!timer_base);
local_base = of_iomap(np, 1);
- if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
- /* The fixed 25MHz timer is available so let's use it */
- u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u | TIMER0_25MHZ,
- timer_base + TIMER_CTRL_OFF);
- timer_clk = 25000000;
- } else {
- unsigned long rate = 0;
- struct clk *clk = of_clk_get(np, 0);
- WARN_ON(IS_ERR(clk));
- rate = clk_get_rate(clk);
-
- u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u & ~(TIMER0_25MHZ),
- timer_base + TIMER_CTRL_OFF);
-
- timer_clk = rate / TIMER_DIVIDER;
- timer25Mhz = false;
- }
+ if (timer25Mhz)
+ set = TIMER0_25MHZ;
+ else
+ clr = TIMER0_25MHZ;
+ timer_ctrl_clrset(clr, set);
+ local_timer_ctrl_clrset(clr, set);
/*
* We use timer 0 as clocksource, and private(local) timer 0
@@ -268,10 +267,8 @@ void __init armada_370_xp_timer_init(void)
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
- u = readl(timer_base + TIMER_CTRL_OFF);
-
- writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF);
+ timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT));
clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
"armada_370_xp_clocksource",
@@ -293,3 +290,29 @@ void __init armada_370_xp_timer_init(void)
if (!res)
armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
}
+
+static void __init armada_xp_timer_init(struct device_node *np)
+{
+ struct clk *clk = of_clk_get_by_name(np, "fixed");
+
+ /* The 25Mhz fixed clock is mandatory, and must always be available */
+ BUG_ON(IS_ERR(clk));
+ timer_clk = clk_get_rate(clk);
+
+ armada_370_xp_timer_common_init(np);
+}
+CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
+ armada_xp_timer_init);
+
+static void __init armada_370_timer_init(struct device_node *np)
+{
+ struct clk *clk = of_clk_get(np, 0);
+
+ BUG_ON(IS_ERR(clk));
+ timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
+ timer25Mhz = false;
+
+ armada_370_xp_timer_common_init(np);
+}
+CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
+ armada_370_timer_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5c75e31..43c24aa 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -280,13 +280,6 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
switch (state) {
case CPUFREQ_PRECHANGE:
- if (WARN(policy->transition_ongoing ==
- cpumask_weight(policy->cpus),
- "In middle of another frequency transition\n"))
- return;
-
- policy->transition_ongoing++;
-
/* detect if the driver reported a value as "old frequency"
* which is not equal to what the cpufreq core thinks is
* "old frequency".
@@ -306,12 +299,6 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
break;
case CPUFREQ_POSTCHANGE:
- if (WARN(!policy->transition_ongoing,
- "No frequency transition in progress\n"))
- return;
-
- policy->transition_ongoing--;
-
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
(unsigned long)freqs->cpu);
@@ -437,7 +424,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
static ssize_t store_##file_name \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
- unsigned int ret; \
+ int ret; \
struct cpufreq_policy new_policy; \
\
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -490,7 +477,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- unsigned int ret;
+ int ret;
char str_governor[16];
struct cpufreq_policy new_policy;
@@ -694,8 +681,13 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+ get_online_cpus();
+
+ if (!cpu_online(policy->cpu))
+ goto unlock;
+
if (!down_read_trylock(&cpufreq_rwsem))
- goto exit;
+ goto unlock;
if (lock_policy_rwsem_write(policy->cpu) < 0)
goto up_read;
@@ -709,7 +701,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
up_read:
up_read(&cpufreq_rwsem);
-exit:
+unlock:
+ put_online_cpus();
+
return ret;
}
@@ -912,11 +906,11 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
struct cpufreq_policy *policy;
unsigned long flags;
- write_lock_irqsave(&cpufreq_driver_lock, flags);
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
return policy;
}
@@ -953,6 +947,21 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
kfree(policy);
}
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ if (cpu == policy->cpu)
+ return;
+
+ policy->last_cpu = policy->cpu;
+ policy->cpu = cpu;
+
+#ifdef CONFIG_CPU_FREQ_TABLE
+ cpufreq_frequency_table_update_policy_cpu(policy);
+#endif
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_UPDATE_POLICY_CPU, policy);
+}
+
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
bool frozen)
{
@@ -1006,7 +1015,18 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
if (!policy)
goto nomem_out;
- policy->cpu = cpu;
+
+ /*
+ * In the resume path, since we restore a saved policy, the assignment
+ * to policy->cpu is like an update of the existing policy, rather than
+ * the creation of a brand new one. So we need to perform this update
+ * by invoking update_policy_cpu().
+ */
+ if (frozen && cpu != policy->cpu)
+ update_policy_cpu(policy, cpu);
+ else
+ policy->cpu = cpu;
+
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1098,18 +1118,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return __cpufreq_add_dev(dev, sif, false);
}
-static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
-{
- policy->last_cpu = policy->cpu;
- policy->cpu = cpu;
-
-#ifdef CONFIG_CPU_FREQ_TABLE
- cpufreq_frequency_table_update_policy_cpu(policy);
-#endif
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_UPDATE_POLICY_CPU, policy);
-}
-
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
unsigned int old_cpu, bool frozen)
{
@@ -1141,22 +1149,14 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
return cpu_dev->id;
}
-/**
- * __cpufreq_remove_dev - remove a CPU device
- *
- * Removes the cpufreq interface for a CPU device.
- * Caller should already have policy_rwsem in write mode for this CPU.
- * This routine frees the rwsem before returning.
- */
-static int __cpufreq_remove_dev(struct device *dev,
- struct subsys_interface *sif, bool frozen)
+static int __cpufreq_remove_dev_prepare(struct device *dev,
+ struct subsys_interface *sif,
+ bool frozen)
{
unsigned int cpu = dev->id, cpus;
int new_cpu, ret;
unsigned long flags;
struct cpufreq_policy *policy;
- struct kobject *kobj;
- struct completion *cmp;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@@ -1196,8 +1196,9 @@ static int __cpufreq_remove_dev(struct device *dev,
cpumask_clear_cpu(cpu, policy->cpus);
unlock_policy_rwsem_write(cpu);
- if (cpu != policy->cpu && !frozen) {
- sysfs_remove_link(&dev->kobj, "cpufreq");
+ if (cpu != policy->cpu) {
+ if (!frozen)
+ sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
@@ -1213,6 +1214,33 @@ static int __cpufreq_remove_dev(struct device *dev,
}
}
+ return 0;
+}
+
+static int __cpufreq_remove_dev_finish(struct device *dev,
+ struct subsys_interface *sif,
+ bool frozen)
+{
+ unsigned int cpu = dev->id, cpus;
+ int ret;
+ unsigned long flags;
+ struct cpufreq_policy *policy;
+ struct kobject *kobj;
+ struct completion *cmp;
+
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!policy) {
+ pr_debug("%s: No cpu_data found\n", __func__);
+ return -EINVAL;
+ }
+
+ lock_policy_rwsem_read(cpu);
+ cpus = cpumask_weight(policy->cpus);
+ unlock_policy_rwsem_read(cpu);
+
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
if (cpufreq_driver->target) {
@@ -1272,6 +1300,27 @@ static int __cpufreq_remove_dev(struct device *dev,
return 0;
}
+/**
+ * __cpufreq_remove_dev - remove a CPU device
+ *
+ * Removes the cpufreq interface for a CPU device.
+ * Caller should already have policy_rwsem in write mode for this CPU.
+ * This routine frees the rwsem before returning.
+ */
+static inline int __cpufreq_remove_dev(struct device *dev,
+ struct subsys_interface *sif,
+ bool frozen)
+{
+ int ret;
+
+ ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
+
+ if (!ret)
+ ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
+
+ return ret;
+}
+
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
@@ -1610,8 +1659,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
- if (policy->transition_ongoing)
- return -EBUSY;
/* Make sure that target_freq is within supported range */
if (target_freq > policy->max)
@@ -1692,8 +1739,9 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->cpu, event);
mutex_lock(&cpufreq_governor_lock);
- if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
- (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
+ if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
+ || (!policy->governor_enabled
+ && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
mutex_unlock(&cpufreq_governor_lock);
return -EBUSY;
}
@@ -1994,7 +2042,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
- __cpufreq_remove_dev(dev, NULL, frozen);
+ __cpufreq_remove_dev_prepare(dev, NULL, frozen);
+ break;
+
+ case CPU_POST_DEAD:
+ __cpufreq_remove_dev_finish(dev, NULL, frozen);
break;
case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 04452f0..4cf0d28 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -74,7 +74,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
for (i = 0; i < stat->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
(unsigned long long)
- cputime64_to_clock_t(stat->time_in_state[i]));
+ jiffies_64_to_clock_t(stat->time_in_state[i]));
}
return len;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6efd96c..9733f29 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -522,6 +522,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x2a, default_policy),
ICPU(0x2d, default_policy),
ICPU(0x3a, default_policy),
+ ICPU(0x3c, default_policy),
+ ICPU(0x3e, default_policy),
+ ICPU(0x3f, default_policy),
+ ICPU(0x45, default_policy),
+ ICPU(0x46, default_policy),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index b330219..8e36603 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -27,3 +27,13 @@ config ARM_U8500_CPUIDLE
help
Select this to enable cpuidle for ST-E u8500 processors
+config CPU_IDLE_BIG_LITTLE
+ bool "Support for ARM big.LITTLE processors"
+ depends on ARCH_VEXPRESS_TC2_PM
+ select ARM_CPU_SUSPEND
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ help
+ Select this option to enable CPU idle driver for big.LITTLE based
+ ARM systems. Driver manages CPUs coordination through MCPM and
+ define different C-states for little and big cores through the
+ multiple CPU idle drivers infrastructure.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 0b9d200..cea5ef5 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
+obj-$(CONFIG_CPU_IDLE_BIG_LITTLE) += cpuidle-big_little.o
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
new file mode 100644
index 0000000..b45fc62
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013 ARM/Linaro
+ *
+ * Authors: Daniel Lezcano <daniel.lezcano@linaro.org>
+ * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * Nicolas Pitre <nicolas.pitre@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/cpuidle.h>
+#include <asm/mcpm.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+static int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx);
+
+/*
+ * NB: Owing to current menu governor behaviour big and LITTLE
+ * index 1 states have to define exit_latency and target_residency for
+ * cluster state since, when all CPUs in a cluster hit it, the cluster
+ * can be shutdown. This means that when a single CPU enters this state
+ * the exit_latency and target_residency values are somewhat overkill.
+ * There is no notion of cluster states in the menu governor, so CPUs
+ * have to define CPU states where possibly the cluster will be shutdown
+ * depending on the state of other CPUs. idle states entry and exit happen
+ * at random times; however the cluster state provides target_residency
+ * values as if all CPUs in a cluster enter the state at once; this is
+ * somewhat optimistic and behaviour should be fixed either in the governor
+ * or in the MCPM back-ends.
+ * To make this driver 100% generic the number of states and the exit_latency
+ * target_residency values must be obtained from device tree bindings.
+ *
+ * exit_latency: refers to the TC2 vexpress test chip and depends on the
+ * current cluster operating point. It is the time it takes to get the CPU
+ * up and running when the CPU is powered up on cluster wake-up from shutdown.
+ * Current values for big and LITTLE clusters are provided for clusters
+ * running at default operating points.
+ *
+ * target_residency: it is the minimum amount of time the cluster has
+ * to be down to break even in terms of power consumption. cluster
+ * shutdown has inherent dynamic power costs (L2 writebacks to DRAM
+ * being the main factor) that depend on the current operating points.
+ * The current values for both clusters are provided for a CPU whose half
+ * of L2 lines are dirty and require cleaning to DRAM, and takes into
+ * account leakage static power values related to the vexpress TC2 testchip.
+ */
+static struct cpuidle_driver bl_idle_little_driver = {
+ .name = "little_idle",
+ .owner = THIS_MODULE,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = bl_enter_powerdown,
+ .exit_latency = 700,
+ .target_residency = 2500,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_TIMER_STOP,
+ .name = "C1",
+ .desc = "ARM little-cluster power down",
+ },
+ .state_count = 2,
+};
+
+static struct cpuidle_driver bl_idle_big_driver = {
+ .name = "big_idle",
+ .owner = THIS_MODULE,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = bl_enter_powerdown,
+ .exit_latency = 500,
+ .target_residency = 2000,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_TIMER_STOP,
+ .name = "C1",
+ .desc = "ARM big-cluster power down",
+ },
+ .state_count = 2,
+};
+
+/*
+ * notrace prevents trace shims from getting inserted where they
+ * should not. Global jumps and ldrex/strex must not be inserted
+ * in power down sequences where caches and MMU may be turned off.
+ */
+static int notrace bl_powerdown_finisher(unsigned long arg)
+{
+ /* MCPM works with HW CPU identifiers */
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
+ mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+
+ /*
+ * Residency value passed to mcpm_cpu_suspend back-end
+ * has to be given clear semantics. Set to 0 as a
+ * temporary value.
+ */
+ mcpm_cpu_suspend(0);
+
+ /* return value != 0 means failure */
+ return 1;
+}
+
+/**
+ * bl_enter_powerdown - Programs CPU to enter the specified state
+ * @dev: cpuidle device
+ * @drv: The target state to be programmed
+ * @idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ cpu_pm_enter();
+
+ cpu_suspend(0, bl_powerdown_finisher);
+
+ /* signals the MCPM core that CPU is out of low power state */
+ mcpm_cpu_powered_up();
+
+ cpu_pm_exit();
+
+ return idx;
+}
+
+static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id)
+{
+ struct cpuinfo_arm *cpu_info;
+ struct cpumask *cpumask;
+ unsigned long cpuid;
+ int cpu;
+
+ cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!cpumask)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ cpu_info = &per_cpu(cpu_data, cpu);
+ cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id();
+
+ /* read cpu id part number */
+ if ((cpuid & 0xFFF0) == cpu_id)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ drv->cpumask = cpumask;
+
+ return 0;
+}
+
+static int __init bl_idle_init(void)
+{
+ int ret;
+
+ /*
+ * Initialize the driver just for a compliant set of machines
+ */
+ if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7"))
+ return -ENODEV;
+ /*
+ * For now the differentiation between little and big cores
+ * is based on the part number. A7 cores are considered little
+ * cores, A15 are considered big cores. This distinction may
+ * evolve in the future with a more generic matching approach.
+ */
+ ret = bl_idle_driver_init(&bl_idle_little_driver,
+ ARM_CPU_PART_CORTEX_A7);
+ if (ret)
+ return ret;
+
+ ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
+ if (ret)
+ goto out_uninit_little;
+
+ ret = cpuidle_register(&bl_idle_little_driver, NULL);
+ if (ret)
+ goto out_uninit_big;
+
+ ret = cpuidle_register(&bl_idle_big_driver, NULL);
+ if (ret)
+ goto out_unregister_little;
+
+ return 0;
+
+out_unregister_little:
+ cpuidle_unregister(&bl_idle_little_driver);
+out_uninit_big:
+ kfree(bl_idle_big_driver.cpumask);
+out_uninit_little:
+ kfree(bl_idle_little_driver.cpumask);
+
+ return ret;
+}
+device_initcall(bl_idle_init);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 3ac499d..6e11701 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -331,7 +331,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
- drv->refcnt++;
+ if (drv)
+ drv->refcnt++;
spin_unlock(&cpuidle_driver_lock);
return drv;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index daa4da2..526ec77 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -308,6 +308,15 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config K3_DMA
+ tristate "Hisilicon K3 DMA support"
+ depends on ARCH_HI3xxx
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the DMA engine for Hisilicon K3 platform
+ devices.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6d62ec3..db89035 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 5a18f82..e69b03c 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -43,7 +43,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
struct list_head resource_list;
struct resource_list_entry *rentry;
resource_size_t mem = 0, irq = 0;
- u32 vendor_id;
int ret;
if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
@@ -73,9 +72,8 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
return 0;
- vendor_id = le32_to_cpu(grp->vendor_id);
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
- (char *)&vendor_id, grp->device_id, grp->revision);
+ (char *)&grp->vendor_id, grp->device_id, grp->revision);
/* Check if the request line range is available */
if (si->base_request_line == 0 && si->num_handshake_signals == 0)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 06fe45c..fce46c5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -24,6 +24,7 @@
*
* Documentation: ARM DDI 0196G == PL080
* Documentation: ARM DDI 0218E == PL081
+ * Documentation: S3C6410 User's Manual == PL080S
*
* PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
* channel.
@@ -36,6 +37,14 @@
*
* The PL080 has a dual bus master, PL081 has a single master.
*
+ * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
+ * It differs in following aspects:
+ * - CH_CONFIG register at different offset,
+ * - separate CH_CONTROL2 register for transfer size,
+ * - bigger maximum transfer size,
+ * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
+ * - no support for peripheral flow control.
+ *
* Memory to peripheral transfer may be visualized as
* Get data from memory to DMAC
* Until no data left
@@ -64,10 +73,7 @@
* - Peripheral flow control: the transfer size is ignored (and should be
* zero). The data is transferred from the current LLI entry, until
* after the final transfer signalled by LBREQ or LSREQ. The DMAC
- * will then move to the next LLI entry.
- *
- * Global TODO:
- * - Break out common code from arch/arm/mach-s3c64xx and share
+ * will then move to the next LLI entry. Unsupported by PL080S.
*/
#include <linux/amba/bus.h>
#include <linux/amba/pl08x.h>
@@ -100,24 +106,16 @@ struct pl08x_driver_data;
* @nomadik: whether the channels have Nomadik security extension bits
* that need to be checked for permission before use and some registers are
* missing
+ * @pl080s: whether this version is a PL080S, which has separate register and
+ * LLI word for transfer size.
*/
struct vendor_data {
+ u8 config_offset;
u8 channels;
bool dualmaster;
bool nomadik;
-};
-
-/*
- * PL08X private data structures
- * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info is in cctl. Also note that these
- * are fixed 32-bit quantities.
- */
-struct pl08x_lli {
- u32 src;
- u32 dst;
- u32 lli;
- u32 cctl;
+ bool pl080s;
+ u32 max_transfer_size;
};
/**
@@ -133,6 +131,8 @@ struct pl08x_bus_data {
u8 buswidth;
};
+#define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
+
/**
* struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel
@@ -145,6 +145,7 @@ struct pl08x_bus_data {
struct pl08x_phy_chan {
unsigned int id;
void __iomem *base;
+ void __iomem *reg_config;
spinlock_t lock;
struct pl08x_dma_chan *serving;
bool locked;
@@ -174,12 +175,13 @@ struct pl08x_sg {
* @ccfg: config reg values for current txd
* @done: this marks completed descriptors, which should not have their
* mux released.
+ * @cyclic: indicate cyclic transfers
*/
struct pl08x_txd {
struct virt_dma_desc vd;
struct list_head dsg_list;
dma_addr_t llis_bus;
- struct pl08x_lli *llis_va;
+ u32 *llis_va;
/* Default cctl value for LLIs */
u32 cctl;
/*
@@ -188,6 +190,7 @@ struct pl08x_txd {
*/
u32 ccfg;
bool done;
+ bool cyclic;
};
/**
@@ -263,17 +266,29 @@ struct pl08x_driver_data {
struct dma_pool *pool;
u8 lli_buses;
u8 mem_buses;
+ u8 lli_words;
};
/*
* PL08X specific defines
*/
-/* Size (bytes) of each LLI buffer allocated for one transfer */
-# define PL08X_LLI_TSFR_SIZE 0x2000
+/* The order of words in an LLI. */
+#define PL080_LLI_SRC 0
+#define PL080_LLI_DST 1
+#define PL080_LLI_LLI 2
+#define PL080_LLI_CCTL 3
+#define PL080S_LLI_CCTL2 4
+
+/* Total words in an LLI. */
+#define PL080_LLI_WORDS 4
+#define PL080S_LLI_WORDS 8
-/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
+/*
+ * Number of LLIs in each LLI buffer allocated for one transfer
+ * (maximum times we call dma_pool_alloc on this pool without freeing)
+ */
+#define MAX_NUM_TSFR_LLIS 512
#define PL08X_ALIGN 8
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -334,10 +349,39 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
{
unsigned int val;
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
return val & PL080_CONFIG_ACTIVE;
}
+static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
+{
+ if (pl08x->vd->pl080s)
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+ lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
+ lli[PL080S_LLI_CCTL2], ccfg);
+ else
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+ lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
+
+ writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
+ writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
+ writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
+ writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
+
+ if (pl08x->vd->pl080s)
+ writel_relaxed(lli[PL080S_LLI_CCTL2],
+ phychan->base + PL080S_CH_CONTROL2);
+
+ writel(ccfg, phychan->reg_config);
+}
+
/*
* Set the initial DMA register values i.e. those for the first LLI
* The next LLI pointer and the configuration interrupt bit have
@@ -350,7 +394,6 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
struct pl08x_phy_chan *phychan = plchan->phychan;
struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
- struct pl08x_lli *lli;
u32 val;
list_del(&txd->vd.node);
@@ -361,19 +404,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
while (pl08x_phy_channel_busy(phychan))
cpu_relax();
- lli = &txd->llis_va[0];
-
- dev_vdbg(&pl08x->adev->dev,
- "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
- "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
- phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
- txd->ccfg);
-
- writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
- writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
- writel(lli->lli, phychan->base + PL080_CH_LLI);
- writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
- writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+ pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
/* Enable the DMA channel */
/* Do not access config register until channel shows as disabled */
@@ -381,11 +412,11 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
cpu_relax();
/* Do not access config register until channel shows as inactive */
- val = readl(phychan->base + PL080_CH_CONFIG);
+ val = readl(phychan->reg_config);
while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
- val = readl(phychan->base + PL080_CH_CONFIG);
+ val = readl(phychan->reg_config);
- writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
+ writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
}
/*
@@ -404,9 +435,9 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
int timeout;
/* Set the HALT bit and wait for the FIFO to drain */
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
val |= PL080_CONFIG_HALT;
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
/* Wait for channel inactive */
for (timeout = 1000; timeout; timeout--) {
@@ -423,9 +454,9 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
u32 val;
/* Clear the HALT bit */
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
val &= ~PL080_CONFIG_HALT;
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
}
/*
@@ -437,12 +468,12 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *ch)
{
- u32 val = readl(ch->base + PL080_CH_CONFIG);
+ u32 val = readl(ch->reg_config);
val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
PL080_CONFIG_TC_IRQ_MASK);
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
@@ -453,6 +484,28 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
/* The source width defines the number of bytes */
u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
+ cctl &= PL080_CONTROL_SWIDTH_MASK;
+
+ switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+ case PL080_WIDTH_8BIT:
+ break;
+ case PL080_WIDTH_16BIT:
+ bytes *= 2;
+ break;
+ case PL080_WIDTH_32BIT:
+ bytes *= 4;
+ break;
+ }
+ return bytes;
+}
+
+static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
+{
+ /* The source width defines the number of bytes */
+ u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
+
+ cctl &= PL080_CONTROL_SWIDTH_MASK;
+
switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
case PL080_WIDTH_8BIT:
break;
@@ -469,47 +522,66 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
/* The channel should be paused when calling this */
static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ const u32 *llis_va, *llis_va_limit;
struct pl08x_phy_chan *ch;
+ dma_addr_t llis_bus;
struct pl08x_txd *txd;
- size_t bytes = 0;
+ u32 llis_max_words;
+ size_t bytes;
+ u32 clli;
ch = plchan->phychan;
txd = plchan->at;
+ if (!ch || !txd)
+ return 0;
+
/*
* Follow the LLIs to get the number of remaining
* bytes in the currently active transaction.
*/
- if (ch && txd) {
- u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
+ clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
- /* First get the remaining bytes in the active transfer */
+ /* First get the remaining bytes in the active transfer */
+ if (pl08x->vd->pl080s)
+ bytes = get_bytes_in_cctl_pl080s(
+ readl(ch->base + PL080_CH_CONTROL),
+ readl(ch->base + PL080S_CH_CONTROL2));
+ else
bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
- if (clli) {
- struct pl08x_lli *llis_va = txd->llis_va;
- dma_addr_t llis_bus = txd->llis_bus;
- int index;
+ if (!clli)
+ return bytes;
- BUG_ON(clli < llis_bus || clli >= llis_bus +
- sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
+ llis_va = txd->llis_va;
+ llis_bus = txd->llis_bus;
- /*
- * Locate the next LLI - as this is an array,
- * it's simple maths to find.
- */
- index = (clli - llis_bus) / sizeof(struct pl08x_lli);
+ llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
+ BUG_ON(clli < llis_bus || clli >= llis_bus +
+ sizeof(u32) * llis_max_words);
- for (; index < MAX_NUM_TSFR_LLIS; index++) {
- bytes += get_bytes_in_cctl(llis_va[index].cctl);
+ /*
+ * Locate the next LLI - as this is an array,
+ * it's simple maths to find.
+ */
+ llis_va += (clli - llis_bus) / sizeof(u32);
- /*
- * A LLI pointer of 0 terminates the LLI list
- */
- if (!llis_va[index].lli)
- break;
- }
- }
+ llis_va_limit = llis_va + llis_max_words;
+
+ for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
+ if (pl08x->vd->pl080s)
+ bytes += get_bytes_in_cctl_pl080s(
+ llis_va[PL080_LLI_CCTL],
+ llis_va[PL080S_LLI_CCTL2]);
+ else
+ bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
+
+ /*
+ * A LLI pointer going backward terminates the LLI list
+ */
+ if (llis_va[PL080_LLI_LLI] <= clli)
+ break;
}
return bytes;
@@ -720,6 +792,7 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
break;
}
+ tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
return retbits;
}
@@ -764,20 +837,26 @@ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
/*
* Fills in one LLI for a certain transfer descriptor and advance the counter
*/
-static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
- int num_llis, int len, u32 cctl)
+static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd,
+ int num_llis, int len, u32 cctl, u32 cctl2)
{
- struct pl08x_lli *llis_va = bd->txd->llis_va;
+ u32 offset = num_llis * pl08x->lli_words;
+ u32 *llis_va = bd->txd->llis_va + offset;
dma_addr_t llis_bus = bd->txd->llis_bus;
BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
- llis_va[num_llis].cctl = cctl;
- llis_va[num_llis].src = bd->srcbus.addr;
- llis_va[num_llis].dst = bd->dstbus.addr;
- llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
- sizeof(struct pl08x_lli);
- llis_va[num_llis].lli |= bd->lli_bus;
+ /* Advance the offset to next LLI. */
+ offset += pl08x->lli_words;
+
+ llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
+ llis_va[PL080_LLI_DST] = bd->dstbus.addr;
+ llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
+ llis_va[PL080_LLI_LLI] |= bd->lli_bus;
+ llis_va[PL080_LLI_CCTL] = cctl;
+ if (pl08x->vd->pl080s)
+ llis_va[PL080S_LLI_CCTL2] = cctl2;
if (cctl & PL080_CONTROL_SRC_INCR)
bd->srcbus.addr += len;
@@ -789,14 +868,53 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
bd->remainder -= len;
}
-static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
- u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
+static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
+ int num_llis, size_t *total_bytes)
{
*cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
- pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+ pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
(*total_bytes) += len;
}
+#ifdef VERBOSE_DEBUG
+static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+ const u32 *llis_va, int num_llis)
+{
+ int i;
+
+ if (pl08x->vd->pl080s) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, llis_va, llis_va[PL080_LLI_SRC],
+ llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+ llis_va[PL080_LLI_CCTL],
+ llis_va[PL080S_LLI_CCTL2]);
+ llis_va += pl08x->lli_words;
+ }
+ } else {
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl");
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, llis_va, llis_va[PL080_LLI_SRC],
+ llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+ llis_va[PL080_LLI_CCTL]);
+ llis_va += pl08x->lli_words;
+ }
+ }
+}
+#else
+static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+ const u32 *llis_va, int num_llis) {}
+#endif
+
/*
* This fills in the table of LLIs for the transfer descriptor
* Note that we assume we never have to change the burst sizes
@@ -810,7 +928,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
int num_llis = 0;
u32 cctl, early_bytes = 0;
size_t max_bytes_per_lli, total_bytes;
- struct pl08x_lli *llis_va;
+ u32 *llis_va, *last_lli;
struct pl08x_sg *dsg;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
@@ -845,10 +963,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
- dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
- bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ dev_vdbg(&pl08x->adev->dev,
+ "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
+ (u64)bd.srcbus.addr,
+ cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
bd.srcbus.buswidth,
- bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ (u64)bd.dstbus.addr,
+ cctl & PL080_CONTROL_DST_INCR ? "+" : "",
bd.dstbus.buswidth,
bd.remainder);
dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
@@ -886,8 +1007,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return 0;
}
- if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
- (bd.dstbus.addr % bd.dstbus.buswidth)) {
+ if (!IS_BUS_ALIGNED(&bd.srcbus) ||
+ !IS_BUS_ALIGNED(&bd.dstbus)) {
dev_err(&pl08x->adev->dev,
"%s src & dst address must be aligned to src"
" & dst width if peripheral is flow controller",
@@ -897,7 +1018,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, 0);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+ pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+ 0, cctl, 0);
break;
}
@@ -908,9 +1030,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
*/
if (bd.remainder < mbus->buswidth)
early_bytes = bd.remainder;
- else if ((mbus->addr) % (mbus->buswidth)) {
- early_bytes = mbus->buswidth - (mbus->addr) %
- (mbus->buswidth);
+ else if (!IS_BUS_ALIGNED(mbus)) {
+ early_bytes = mbus->buswidth -
+ (mbus->addr & (mbus->buswidth - 1));
if ((bd.remainder - early_bytes) < mbus->buswidth)
early_bytes = bd.remainder;
}
@@ -919,8 +1041,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
dev_vdbg(&pl08x->adev->dev,
"%s byte width LLIs (remain 0x%08x)\n",
__func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
- &total_bytes);
+ prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
+ num_llis++, &total_bytes);
}
if (bd.remainder) {
@@ -928,7 +1050,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* Master now aligned
* - if slave is not then we must set its width down
*/
- if (sbus->addr % sbus->buswidth) {
+ if (!IS_BUS_ALIGNED(sbus)) {
dev_dbg(&pl08x->adev->dev,
"%s set down bus width to one byte\n",
__func__);
@@ -941,7 +1063,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* MIN(buswidths)
*/
max_bytes_per_lli = bd.srcbus.buswidth *
- PL080_CONTROL_TRANSFER_SIZE_MASK;
+ pl08x->vd->max_transfer_size;
dev_vdbg(&pl08x->adev->dev,
"%s max bytes per lli = %zu\n",
__func__, max_bytes_per_lli);
@@ -976,8 +1098,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, tsize);
- pl08x_fill_lli_for_desc(&bd, num_llis++,
- lli_len, cctl);
+ pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+ lli_len, cctl, tsize);
total_bytes += lli_len;
}
@@ -988,8 +1110,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, send odd bytes (remain %zu)\n",
__func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, bd.remainder,
- num_llis++, &total_bytes);
+ prep_byte_width_lli(pl08x, &bd, &cctl,
+ bd.remainder, num_llis++, &total_bytes);
}
}
@@ -1003,33 +1125,25 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
if (num_llis >= MAX_NUM_TSFR_LLIS) {
dev_err(&pl08x->adev->dev,
"%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
- __func__, (u32) MAX_NUM_TSFR_LLIS);
+ __func__, MAX_NUM_TSFR_LLIS);
return 0;
}
}
llis_va = txd->llis_va;
- /* The final LLI terminates the LLI. */
- llis_va[num_llis - 1].lli = 0;
- /* The final LLI element shall also fire an interrupt. */
- llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+ last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
-#ifdef VERBOSE_DEBUG
- {
- int i;
-
- dev_vdbg(&pl08x->adev->dev,
- "%-3s %-9s %-10s %-10s %-10s %s\n",
- "lli", "", "csrc", "cdst", "clli", "cctl");
- for (i = 0; i < num_llis; i++) {
- dev_vdbg(&pl08x->adev->dev,
- "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, &llis_va[i], llis_va[i].src,
- llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
- );
- }
+ if (txd->cyclic) {
+ /* Link back to the first LLI. */
+ last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
+ } else {
+ /* The final LLI terminates the LLI. */
+ last_lli[PL080_LLI_LLI] = 0;
+ /* The final LLI element shall also fire an interrupt. */
+ last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
}
-#endif
+
+ pl08x_dump_lli(pl08x, llis_va, num_llis);
return num_llis;
}
@@ -1305,6 +1419,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
if (!plchan->slave)
return -EINVAL;
@@ -1314,6 +1429,13 @@ static int dma_set_runtime_config(struct dma_chan *chan,
config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
+ if (config->device_fc && pl08x->vd->pl080s) {
+ dev_err(&pl08x->adev->dev,
+ "%s: PL080S does not support peripheral flow control\n",
+ __func__);
+ return -EINVAL;
+ }
+
plchan->cfg = *config;
return 0;
@@ -1404,25 +1526,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
-static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct pl08x_txd *pl08x_init_txd(
+ struct dma_chan *chan,
+ enum dma_transfer_direction direction,
+ dma_addr_t *slave_addr)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- struct pl08x_sg *dsg;
- struct scatterlist *sg;
enum dma_slave_buswidth addr_width;
- dma_addr_t slave_addr;
int ret, tmp;
u8 src_buses, dst_buses;
u32 maxburst, cctl;
- dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sg_dma_len(sgl), plchan->name);
-
txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
@@ -1436,14 +1552,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
if (direction == DMA_MEM_TO_DEV) {
cctl = PL080_CONTROL_SRC_INCR;
- slave_addr = plchan->cfg.dst_addr;
+ *slave_addr = plchan->cfg.dst_addr;
addr_width = plchan->cfg.dst_addr_width;
maxburst = plchan->cfg.dst_maxburst;
src_buses = pl08x->mem_buses;
dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_DEV_TO_MEM) {
cctl = PL080_CONTROL_DST_INCR;
- slave_addr = plchan->cfg.src_addr;
+ *slave_addr = plchan->cfg.src_addr;
addr_width = plchan->cfg.src_addr_width;
maxburst = plchan->cfg.src_maxburst;
src_buses = plchan->cd->periph_buses;
@@ -1492,24 +1608,107 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
else
txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+ return txd;
+}
+
+static int pl08x_tx_add_sg(struct pl08x_txd *txd,
+ enum dma_transfer_direction direction,
+ dma_addr_t slave_addr,
+ dma_addr_t buf_addr,
+ unsigned int len)
+{
+ struct pl08x_sg *dsg;
+
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg)
+ return -ENOMEM;
+
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = len;
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = buf_addr;
+ dsg->dst_addr = slave_addr;
+ } else {
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = buf_addr;
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ struct scatterlist *sg;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+ __func__, sg_dma_len(sgl), plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
for_each_sg(sgl, sg, sg_len, tmp) {
- dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
- if (!dsg) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ sg_dma_address(sg),
+ sg_dma_len(sg));
+ if (ret) {
pl08x_release_mux(plchan);
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
__func__);
return NULL;
}
- list_add_tail(&dsg->node, &txd->dsg_list);
+ }
- dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_dma_address(sg);
- dsg->dst_addr = slave_addr;
- } else {
- dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_dma_address(sg);
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev,
+ "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+ __func__, period_len, buf_len,
+ direction == DMA_MEM_TO_DEV ? "to" : "from",
+ plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
+ txd->cyclic = true;
+ txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
+ for (tmp = 0; tmp < buf_len; tmp += period_len) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ buf_addr + tmp, period_len);
+ if (ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
}
}
@@ -1652,7 +1851,9 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
spin_lock(&plchan->vc.lock);
tx = plchan->at;
- if (tx) {
+ if (tx && tx->cyclic) {
+ vchan_cyclic_callback(&tx->vd);
+ } else if (tx) {
plchan->at = NULL;
/*
* This descriptor is done, release its mux
@@ -1846,6 +2047,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;
+ u32 tsfr_size;
int ret = 0;
int i;
@@ -1873,6 +2075,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Initialize slave engine */
dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
pl08x->slave.dev = &adev->dev;
pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
@@ -1880,6 +2083,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
pl08x->slave.device_issue_pending = pl08x_issue_pending;
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+ pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
pl08x->slave.device_control = pl08x_control;
/* Get the platform data */
@@ -1902,9 +2106,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->mem_buses = pl08x->pd->mem_buses;
}
+ if (vd->pl080s)
+ pl08x->lli_words = PL080S_LLI_WORDS;
+ else
+ pl08x->lli_words = PL080_LLI_WORDS;
+ tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
+
/* A DMA memory pool for LLIs, align on 1-byte boundary */
pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
- PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+ tsfr_size, PL08X_ALIGN, 0);
if (!pl08x->pool) {
ret = -ENOMEM;
goto out_no_lli_pool;
@@ -1947,6 +2157,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
+ ch->reg_config = ch->base + vd->config_offset;
spin_lock_init(&ch->lock);
/*
@@ -1957,7 +2168,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
if (vd->nomadik) {
u32 val;
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
ch->locked = true;
@@ -2008,8 +2219,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pl08x);
init_pl08x_debugfs(pl08x);
- dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
- amba_part(adev), amba_rev(adev),
+ dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
+ amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]);
return 0;
@@ -2038,22 +2249,41 @@ out_no_pl08x:
/* PL080 has 8 channels and the PL080 have just 2 */
static struct vendor_data vendor_pl080 = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 8,
.dualmaster = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct vendor_data vendor_nomadik = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 8,
.dualmaster = true,
.nomadik = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
+};
+
+static struct vendor_data vendor_pl080s = {
+ .config_offset = PL080S_CH_CONFIG,
+ .channels = 8,
+ .pl080s = true,
+ .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
};
static struct vendor_data vendor_pl081 = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 2,
.dualmaster = false,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct amba_id pl08x_ids[] = {
+ /* Samsung PL080S variant */
+ {
+ .id = 0x0a141080,
+ .mask = 0xffffffff,
+ .data = &vendor_pl080s,
+ },
/* PL080 */
{
.id = 0x00041080,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 99af4db..9162ac8 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -382,20 +382,30 @@ void dma_issue_pending_all(void)
EXPORT_SYMBOL(dma_issue_pending_all);
/**
- * nth_chan - returns the nth channel of the given capability
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as the cpu
* @cap: capability to match
- * @n: nth channel desired
+ * @cpu: cpu index which the channel should be close to
*
- * Defaults to returning the channel with the desired capability and the
- * lowest reference count when 'n' cannot be satisfied. Must be called
- * under dma_list_mutex.
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
*/
-static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
{
struct dma_device *device;
struct dma_chan *chan;
- struct dma_chan *ret = NULL;
struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
list_for_each_entry(device, &dma_device_list, global_node) {
if (!dma_has_cap(cap, device->cap_mask) ||
@@ -404,27 +414,22 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
list_for_each_entry(chan, &device->channels, device_node) {
if (!chan->client_count)
continue;
- if (!min)
- min = chan;
- else if (chan->table_count < min->table_count)
+ if (!min || chan->table_count < min->table_count)
min = chan;
- if (n-- == 0) {
- ret = chan;
- break; /* done */
- }
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
}
- if (ret)
- break; /* done */
}
- if (!ret)
- ret = min;
+ chan = localmin ? localmin : min;
- if (ret)
- ret->table_count++;
+ if (chan)
+ chan->table_count++;
- return ret;
+ return chan;
}
/**
@@ -441,7 +446,6 @@ static void dma_channel_rebalance(void)
struct dma_device *device;
int cpu;
int cap;
- int n;
/* undo the last distribution */
for_each_dma_cap_mask(cap, dma_cap_mask_all)
@@ -460,14 +464,9 @@ static void dma_channel_rebalance(void)
return;
/* redistribute available channels */
- n = 0;
for_each_dma_cap_mask(cap, dma_cap_mask_all)
for_each_online_cpu(cpu) {
- if (num_possible_cpus() > 1)
- chan = nth_chan(cap, n++);
- else
- chan = nth_chan(cap, -1);
-
+ chan = min_chan(cap, cpu);
per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
}
}
@@ -510,7 +509,33 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
}
/**
- * dma_request_channel - try to allocate an exclusive channel
+ * dma_request_slave_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+ int err = -EBUSY;
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ if (chan->client_count == 0) {
+ err = dma_chan_get(chan);
+ if (err)
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+ } else
+ chan = NULL;
+
+ mutex_unlock(&dma_list_mutex);
+
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+/**
+ * __dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e88ded2..92f796c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -25,44 +25,46 @@
#include <linux/seq_file.h>
static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO);
+module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_channel[20];
-module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
+module_param_string(channel, test_channel, sizeof(test_channel),
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
static char test_device[20];
-module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
+module_param_string(device, test_device, sizeof(test_device),
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO);
+module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO);
+module_param(max_channels, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO);
+module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO);
+module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO);
+module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, uint, S_IRUGO);
+module_param(timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
@@ -193,7 +195,6 @@ struct dmatest_info {
/* debugfs related stuff */
struct dentry *root;
- struct dmatest_params dbgfs_params;
/* Test results */
struct list_head results;
@@ -406,7 +407,11 @@ static int thread_result_add(struct dmatest_info *info,
list_add_tail(&tr->node, &r->results);
mutex_unlock(&info->results_lock);
- pr_warn("%s\n", thread_result_get(r->name, tr));
+ if (tr->type == DMATEST_ET_OK)
+ pr_debug("%s\n", thread_result_get(r->name, tr));
+ else
+ pr_warn("%s\n", thread_result_get(r->name, tr));
+
return 0;
}
@@ -1007,7 +1012,15 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
result_free(info, NULL);
/* Copy test parameters */
- memcpy(params, &info->dbgfs_params, sizeof(*params));
+ params->buf_size = test_buf_size;
+ strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strlcpy(params->device, strim(test_device), sizeof(params->device));
+ params->threads_per_chan = threads_per_chan;
+ params->max_channels = max_channels;
+ params->iterations = iterations;
+ params->xor_sources = xor_sources;
+ params->pq_sources = pq_sources;
+ params->timeout = timeout;
/* Run test with new parameters */
return __run_threaded_test(info);
@@ -1029,71 +1042,6 @@ static bool __is_threaded_test_run(struct dmatest_info *info)
return false;
}
-static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
- const void __user *from, size_t count)
-{
- char tmp[20];
- ssize_t len;
-
- len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
- if (len >= 0) {
- tmp[len] = '\0';
- strlcpy(to, strim(tmp), available);
- }
-
- return len;
-}
-
-static ssize_t dtf_read_channel(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- info->dbgfs_params.channel,
- strlen(info->dbgfs_params.channel));
-}
-
-static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return dtf_write_string(info->dbgfs_params.channel,
- sizeof(info->dbgfs_params.channel),
- ppos, buf, size);
-}
-
-static const struct file_operations dtf_channel_fops = {
- .read = dtf_read_channel,
- .write = dtf_write_channel,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static ssize_t dtf_read_device(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- info->dbgfs_params.device,
- strlen(info->dbgfs_params.device));
-}
-
-static ssize_t dtf_write_device(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return dtf_write_string(info->dbgfs_params.device,
- sizeof(info->dbgfs_params.device),
- ppos, buf, size);
-}
-
-static const struct file_operations dtf_device_fops = {
- .read = dtf_read_device,
- .write = dtf_write_device,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1187,8 +1135,6 @@ static const struct file_operations dtf_results_fops = {
static int dmatest_register_dbgfs(struct dmatest_info *info)
{
struct dentry *d;
- struct dmatest_params *params = &info->dbgfs_params;
- int ret = -ENOMEM;
d = debugfs_create_dir("dmatest", NULL);
if (IS_ERR(d))
@@ -1198,81 +1144,24 @@ static int dmatest_register_dbgfs(struct dmatest_info *info)
info->root = d;
- /* Copy initial values */
- memcpy(params, &info->params, sizeof(*params));
-
- /* Test parameters */
-
- d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->buf_size);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
- info, &dtf_channel_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
- info, &dtf_device_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->threads_per_chan);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->max_channels);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->iterations);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->xor_sources);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->pq_sources);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->timeout);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
/* Run or stop threaded test */
- d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
- info, &dtf_run_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
+ debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
+ &dtf_run_fops);
/* Results of test in progress */
- d = debugfs_create_file("results", S_IRUGO, info->root, info,
- &dtf_results_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
+ debugfs_create_file("results", S_IRUGO, info->root, info,
+ &dtf_results_fops);
return 0;
-err_node:
- debugfs_remove_recursive(info->root);
err_root:
pr_err("dmatest: Failed to initialize debugfs\n");
- return ret;
+ return -ENOMEM;
}
static int __init dmatest_init(void)
{
struct dmatest_info *info = &test_info;
- struct dmatest_params *params = &info->params;
int ret;
memset(info, 0, sizeof(*info));
@@ -1283,17 +1172,6 @@ static int __init dmatest_init(void)
mutex_init(&info->results_lock);
INIT_LIST_HEAD(&info->results);
- /* Set default parameters */
- params->buf_size = test_buf_size;
- strlcpy(params->channel, test_channel, sizeof(params->channel));
- strlcpy(params->device, test_device, sizeof(params->device));
- params->threads_per_chan = threads_per_chan;
- params->max_channels = max_channels;
- params->iterations = iterations;
- params->xor_sources = xor_sources;
- params->pq_sources = pq_sources;
- params->timeout = timeout;
-
ret = dmatest_register_dbgfs(info);
if (ret)
return ret;
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index dde1324..dcfe964 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -4,7 +4,6 @@
config DW_DMAC_CORE
tristate "Synopsys DesignWare AHB DMA support"
- depends on GENERIC_HARDIRQS
select DMA_ENGINE
config DW_DMAC
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index eea479c..89eb89f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -37,16 +37,22 @@
* which does not support descriptor writeback.
*/
+static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
+{
+ return dwc->request_line == (typeof(dwc->request_line))~0;
+}
+
static inline void dwc_set_masters(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_dma_slave *dws = dwc->chan.private;
unsigned char mmax = dw->nr_masters - 1;
- if (dwc->request_line == ~0) {
- dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
- dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
- }
+ if (!is_request_line_unset(dwc))
+ return;
+
+ dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+ dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
}
#define DWC_DEFAULT_CTLLO(_chan) ({ \
@@ -644,10 +650,13 @@ static void dw_dma_tasklet(unsigned long data)
static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
{
struct dw_dma *dw = dev_id;
- u32 status;
+ u32 status = dma_readl(dw, STATUS_INT);
+
+ dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
- dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
- dma_readl(dw, STATUS_INT));
+ /* Check if we have any interrupt from the DMAC */
+ if (!status)
+ return IRQ_NONE;
/*
* Just disable the interrupts. We'll turn them back on in the
@@ -984,7 +993,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
dwc->direction = sconfig->direction;
/* Take the request line from slave_id member */
- if (dwc->request_line == ~0)
+ if (is_request_line_unset(dwc))
dwc->request_line = sconfig->slave_id;
convert_burst(&dwc->dma_sconfig.src_maxburst);
@@ -1089,16 +1098,16 @@ dwc_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ if (ret == DMA_SUCCESS)
+ return ret;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS)
dma_set_residue(txstate, dwc_get_residue(dwc));
- if (dwc->paused)
+ if (dwc->paused && ret == DMA_IN_PROGRESS)
return DMA_PAUSED;
return ret;
@@ -1560,8 +1569,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
- "dw_dmac", dw);
+ err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
+ IRQF_SHARED, "dw_dmac", dw);
if (err)
return err;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6c9449c..e35d975 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -253,6 +253,7 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
{ }
};
+MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
#endif
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 5f3e532..ff50ff4 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -56,6 +56,7 @@ struct edma_desc {
struct list_head node;
int absync;
int pset_nr;
+ int processed;
struct edmacc_param pset[0];
};
@@ -69,6 +70,7 @@ struct edma_chan {
int ch_num;
bool alloced;
int slot[EDMA_MAX_SLOTS];
+ int missed;
struct dma_slave_config cfg;
};
@@ -104,22 +106,34 @@ static void edma_desc_free(struct virt_dma_desc *vdesc)
/* Dispatch a queued descriptor to the controller (caller holds lock) */
static void edma_execute(struct edma_chan *echan)
{
- struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
+ struct virt_dma_desc *vdesc;
struct edma_desc *edesc;
- int i;
-
- if (!vdesc) {
- echan->edesc = NULL;
- return;
+ struct device *dev = echan->vchan.chan.device->dev;
+ int i, j, left, nslots;
+
+ /* If either we processed all psets or we're still not started */
+ if (!echan->edesc ||
+ echan->edesc->pset_nr == echan->edesc->processed) {
+ /* Get next vdesc */
+ vdesc = vchan_next_desc(&echan->vchan);
+ if (!vdesc) {
+ echan->edesc = NULL;
+ return;
+ }
+ list_del(&vdesc->node);
+ echan->edesc = to_edma_desc(&vdesc->tx);
}
- list_del(&vdesc->node);
+ edesc = echan->edesc;
- echan->edesc = edesc = to_edma_desc(&vdesc->tx);
+ /* Find out how many left */
+ left = edesc->pset_nr - edesc->processed;
+ nslots = min(MAX_NR_SG, left);
/* Write descriptor PaRAM set(s) */
- for (i = 0; i < edesc->pset_nr; i++) {
- edma_write_slot(echan->slot[i], &edesc->pset[i]);
+ for (i = 0; i < nslots; i++) {
+ j = i + edesc->processed;
+ edma_write_slot(echan->slot[i], &edesc->pset[j]);
dev_dbg(echan->vchan.chan.device->dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
@@ -132,24 +146,50 @@ static void edma_execute(struct edma_chan *echan)
" bidx\t%08x\n"
" cidx\t%08x\n"
" lkrld\t%08x\n",
- i, echan->ch_num, echan->slot[i],
- edesc->pset[i].opt,
- edesc->pset[i].src,
- edesc->pset[i].dst,
- edesc->pset[i].a_b_cnt,
- edesc->pset[i].ccnt,
- edesc->pset[i].src_dst_bidx,
- edesc->pset[i].src_dst_cidx,
- edesc->pset[i].link_bcntrld);
+ j, echan->ch_num, echan->slot[i],
+ edesc->pset[j].opt,
+ edesc->pset[j].src,
+ edesc->pset[j].dst,
+ edesc->pset[j].a_b_cnt,
+ edesc->pset[j].ccnt,
+ edesc->pset[j].src_dst_bidx,
+ edesc->pset[j].src_dst_cidx,
+ edesc->pset[j].link_bcntrld);
/* Link to the previous slot if not the last set */
- if (i != (edesc->pset_nr - 1))
+ if (i != (nslots - 1))
edma_link(echan->slot[i], echan->slot[i+1]);
- /* Final pset links to the dummy pset */
- else
- edma_link(echan->slot[i], echan->ecc->dummy_slot);
}
- edma_start(echan->ch_num);
+ edesc->processed += nslots;
+
+ /*
+ * If this is either the last set in a set of SG-list transactions
+ * then setup a link to the dummy slot, this results in all future
+ * events being absorbed and that's OK because we're done
+ */
+ if (edesc->processed == edesc->pset_nr)
+ edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
+
+ edma_resume(echan->ch_num);
+
+ if (edesc->processed <= MAX_NR_SG) {
+ dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+ edma_start(echan->ch_num);
+ }
+
+ /*
+ * This happens due to setup times between intermediate transfers
+ * in long SG lists which have to be broken up into transfers of
+ * MAX_NR_SG
+ */
+ if (echan->missed) {
+ dev_dbg(dev, "missed event in execute detected\n");
+ edma_clean_channel(echan->ch_num);
+ edma_stop(echan->ch_num);
+ edma_start(echan->ch_num);
+ edma_trigger_channel(echan->ch_num);
+ echan->missed = 0;
+ }
}
static int edma_terminate_all(struct edma_chan *echan)
@@ -222,9 +262,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
enum dma_slave_buswidth dev_width;
u32 burst;
struct scatterlist *sg;
- int i;
int acnt, bcnt, ccnt, src, dst, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
+ int i, nslots;
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
@@ -247,12 +287,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
- if (sg_len > MAX_NR_SG) {
- dev_err(dev, "Exceeded max SG segments %d > %d\n",
- sg_len, MAX_NR_SG);
- return NULL;
- }
-
edesc = kzalloc(sizeof(*edesc) + sg_len *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
@@ -262,8 +296,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edesc->pset_nr = sg_len;
- for_each_sg(sgl, sg, sg_len, i) {
- /* Allocate a PaRAM slot, if needed */
+ /* Allocate a PaRAM slot, if needed */
+ nslots = min_t(unsigned, MAX_NR_SG, sg_len);
+
+ for (i = 0; i < nslots; i++) {
if (echan->slot[i] < 0) {
echan->slot[i] =
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
@@ -273,6 +309,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
}
+ }
+
+ /* Configure PaRAM sets for each SG */
+ for_each_sg(sgl, sg, sg_len, i) {
acnt = dev_width;
@@ -330,6 +370,12 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* Configure A or AB synchronized transfers */
if (edesc->absync)
edesc->pset[i].opt |= SYNCDIM;
+
+ /* If this is the last in a current SG set of transactions,
+ enable interrupts so that next set is processed */
+ if (!((i+1) % MAX_NR_SG))
+ edesc->pset[i].opt |= TCINTEN;
+
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
edesc->pset[i].opt |= TCINTEN;
@@ -355,27 +401,65 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
struct device *dev = echan->vchan.chan.device->dev;
struct edma_desc *edesc;
unsigned long flags;
+ struct edmacc_param p;
- /* Stop the channel */
- edma_stop(echan->ch_num);
+ /* Pause the channel */
+ edma_pause(echan->ch_num);
switch (ch_status) {
case DMA_COMPLETE:
- dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
-
spin_lock_irqsave(&echan->vchan.lock, flags);
edesc = echan->edesc;
if (edesc) {
+ if (edesc->processed == edesc->pset_nr) {
+ dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
+ edma_stop(echan->ch_num);
+ vchan_cookie_complete(&edesc->vdesc);
+ } else {
+ dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+ }
+
edma_execute(echan);
- vchan_cookie_complete(&edesc->vdesc);
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
break;
case DMA_CC_ERROR:
- dev_dbg(dev, "transfer error on channel %d\n", ch_num);
+ spin_lock_irqsave(&echan->vchan.lock, flags);
+
+ edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
+
+ /*
+ * Issue later based on missed flag which will be sure
+ * to happen as:
+ * (1) we finished transmitting an intermediate slot and
+ * edma_execute is coming up.
+ * (2) or we finished current transfer and issue will
+ * call edma_execute.
+ *
+ * Important note: issuing can be dangerous here and
+ * lead to some nasty recursion when we are in a NULL
+ * slot. So we avoid doing so and set the missed flag.
+ */
+ if (p.a_b_cnt == 0 && p.ccnt == 0) {
+ dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
+ echan->missed = 1;
+ } else {
+ /*
+ * The slot is already programmed but the event got
+ * missed, so its safe to issue it here.
+ */
+ dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
+ edma_clean_channel(echan->ch_num);
+ edma_stop(echan->ch_num);
+ edma_start(echan->ch_num);
+ edma_trigger_channel(echan->ch_num);
+ }
+
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
+
break;
default:
break;
@@ -502,8 +586,6 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
} else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
struct edma_desc *edesc = echan->edesc;
txstate->residue = edma_desc_size(edesc);
- } else {
- txstate->residue = 0;
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f2bf8c0..591cd8c 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1313,15 +1313,7 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *state)
{
- struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&edmac->lock, flags);
- ret = dma_cookie_status(chan, cookie, state);
- spin_unlock_irqrestore(&edmac->lock, flags);
-
- return ret;
+ return dma_cookie_status(chan, cookie, state);
}
/**
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 49e8fbd..b3f3e90 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -979,15 +979,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct fsldma_chan *chan = to_fsl_chan(dchan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
- ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ff2aab9..78f8ca5 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -805,10 +805,8 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
}
INIT_LIST_HEAD(&imxdmac->ld_free);
- if (imxdmac->sg_list) {
- kfree(imxdmac->sg_list);
- imxdmac->sg_list = NULL;
- }
+ kfree(imxdmac->sg_list);
+ imxdmac->sg_list = NULL;
}
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1e44b8c..fc43603 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -243,7 +243,6 @@ struct sdma_engine;
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
- * @done channel completion
* @num_bd max NUM_BD. number of descriptors currently handling
*/
struct sdma_channel {
@@ -255,7 +254,6 @@ struct sdma_channel {
unsigned int event_id1;
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
- struct completion done;
unsigned int num_bd;
struct sdma_buffer_descriptor *bd;
dma_addr_t bd_phys;
@@ -307,9 +305,10 @@ struct sdma_firmware_header {
u32 ram_code_size;
};
-enum sdma_devtype {
- IMX31_SDMA, /* runs on i.mx31 */
- IMX35_SDMA, /* runs on i.mx35 and later */
+struct sdma_driver_data {
+ int chnenbl0;
+ int num_events;
+ struct sdma_script_start_addrs *script_addrs;
};
struct sdma_engine {
@@ -318,8 +317,6 @@ struct sdma_engine {
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
- enum sdma_devtype devtype;
- unsigned int num_events;
struct sdma_context_data *context;
dma_addr_t context_phys;
struct dma_device dma_device;
@@ -327,15 +324,118 @@ struct sdma_engine {
struct clk *clk_ahb;
spinlock_t channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
+ const struct sdma_driver_data *drvdata;
+};
+
+static struct sdma_driver_data sdma_imx31 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX31,
+ .num_events = 32,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx25 = {
+ .ap_2_ap_addr = 729,
+ .uart_2_mcu_addr = 904,
+ .per_2_app_addr = 1255,
+ .mcu_2_app_addr = 834,
+ .uartsh_2_mcu_addr = 1120,
+ .per_2_shp_addr = 1329,
+ .mcu_2_shp_addr = 1048,
+ .ata_2_mcu_addr = 1560,
+ .mcu_2_ata_addr = 1479,
+ .app_2_per_addr = 1189,
+ .app_2_mcu_addr = 770,
+ .shp_2_per_addr = 1407,
+ .shp_2_mcu_addr = 979,
+};
+
+static struct sdma_driver_data sdma_imx25 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx25,
+};
+
+static struct sdma_driver_data sdma_imx35 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx51 = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .mcu_2_shp_addr = 961,
+ .ata_2_mcu_addr = 1473,
+ .mcu_2_ata_addr = 1392,
+ .app_2_per_addr = 1033,
+ .app_2_mcu_addr = 683,
+ .shp_2_per_addr = 1251,
+ .shp_2_mcu_addr = 892,
+};
+
+static struct sdma_driver_data sdma_imx51 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx51,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx53 = {
+ .ap_2_ap_addr = 642,
+ .app_2_mcu_addr = 683,
+ .mcu_2_app_addr = 747,
+ .uart_2_mcu_addr = 817,
+ .shp_2_mcu_addr = 891,
+ .mcu_2_shp_addr = 960,
+ .uartsh_2_mcu_addr = 1032,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+ .firi_2_mcu_addr = 1193,
+ .mcu_2_firi_addr = 1290,
+};
+
+static struct sdma_driver_data sdma_imx53 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx53,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx6q = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .per_2_per_addr = 6331,
+ .uartsh_2_mcu_addr = 1032,
+ .mcu_2_shp_addr = 960,
+ .app_2_mcu_addr = 683,
+ .shp_2_mcu_addr = 891,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+};
+
+static struct sdma_driver_data sdma_imx6q = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx6q,
};
static struct platform_device_id sdma_devtypes[] = {
{
+ .name = "imx25-sdma",
+ .driver_data = (unsigned long)&sdma_imx25,
+ }, {
.name = "imx31-sdma",
- .driver_data = IMX31_SDMA,
+ .driver_data = (unsigned long)&sdma_imx31,
}, {
.name = "imx35-sdma",
- .driver_data = IMX35_SDMA,
+ .driver_data = (unsigned long)&sdma_imx35,
+ }, {
+ .name = "imx51-sdma",
+ .driver_data = (unsigned long)&sdma_imx51,
+ }, {
+ .name = "imx53-sdma",
+ .driver_data = (unsigned long)&sdma_imx53,
+ }, {
+ .name = "imx6q-sdma",
+ .driver_data = (unsigned long)&sdma_imx6q,
}, {
/* sentinel */
}
@@ -343,8 +443,11 @@ static struct platform_device_id sdma_devtypes[] = {
MODULE_DEVICE_TABLE(platform, sdma_devtypes);
static const struct of_device_id sdma_dt_ids[] = {
- { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
- { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
+ { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
+ { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
+ { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
+ { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
+ { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -356,8 +459,7 @@ MODULE_DEVICE_TABLE(of, sdma_dt_ids);
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
- u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
- SDMA_CHNENBL0_IMX35);
+ u32 chnenbl0 = sdma->drvdata->chnenbl0;
return chnenbl0 + event * 4;
}
@@ -547,8 +649,6 @@ static void sdma_tasklet(unsigned long data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
- complete(&sdmac->done);
-
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
else
@@ -733,7 +833,7 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
sdmac->per_addr = 0;
if (sdmac->event_id0) {
- if (sdmac->event_id0 >= sdmac->sdma->num_events)
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0);
}
@@ -812,9 +912,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
-
- init_completion(&sdmac->done);
-
return 0;
out:
@@ -1120,15 +1217,12 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
}
static enum dma_status sdma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- dma_cookie_t last_used;
-
- last_used = chan->cookie;
- dma_set_tx_state(txstate, chan->completed_cookie, last_used,
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
@@ -1218,19 +1312,6 @@ static int __init sdma_init(struct sdma_engine *sdma)
int i, ret;
dma_addr_t ccb_phys;
- switch (sdma->devtype) {
- case IMX31_SDMA:
- sdma->num_events = 32;
- break;
- case IMX35_SDMA:
- sdma->num_events = 48;
- break;
- default:
- dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
- sdma->devtype);
- return -ENODEV;
- }
-
clk_enable(sdma->clk_ipg);
clk_enable(sdma->clk_ahb);
@@ -1257,7 +1338,7 @@ static int __init sdma_init(struct sdma_engine *sdma)
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
/* disable all channels */
- for (i = 0; i < sdma->num_events; i++)
+ for (i = 0; i < sdma->drvdata->num_events; i++)
writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
/* All channels have priority 0 */
@@ -1335,10 +1416,21 @@ static int __init sdma_probe(struct platform_device *pdev)
int ret;
int irq;
struct resource *iores;
- struct sdma_platform_data *pdata = pdev->dev.platform_data;
+ struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i;
struct sdma_engine *sdma;
s32 *saddr_arr;
+ const struct sdma_driver_data *drvdata = NULL;
+
+ if (of_id)
+ drvdata = of_id->data;
+ else if (pdev->id_entry)
+ drvdata = (void *)pdev->id_entry->driver_data;
+
+ if (!drvdata) {
+ dev_err(&pdev->dev, "unable to find driver data\n");
+ return -EINVAL;
+ }
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
@@ -1347,6 +1439,7 @@ static int __init sdma_probe(struct platform_device *pdev)
spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
+ sdma->drvdata = drvdata;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -1396,10 +1489,6 @@ static int __init sdma_probe(struct platform_device *pdev)
for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
saddr_arr[i] = -EINVAL;
- if (of_id)
- pdev->id_entry = of_id->data;
- sdma->devtype = pdev->id_entry->driver_data;
-
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
@@ -1431,6 +1520,8 @@ static int __init sdma_probe(struct platform_device *pdev)
if (ret)
goto err_init;
+ if (sdma->drvdata->script_addrs)
+ sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b642e03..d8ececa 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -251,7 +251,7 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
}
static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
- dma_addr_t addr, u32 offset, u8 coef, int idx)
+ dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
{
struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
struct ioat_pq16a_descriptor *pq16 =
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_xeon_cb32(pdev))
- dma->copy_align = 6;
-
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
- if (is_bwd_noraid(pdev))
+ if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
- dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma_set_maxpq(dma, 16, 0);
- dma->pq_align = 0;
} else {
dma_set_maxpq(dma, 8, 0);
- if (is_xeon_cb32(pdev))
- dma->pq_align = 6;
- else
- dma->pq_align = 0;
}
if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma->max_xor = 16;
- dma->xor_align = 0;
} else {
dma->max_xor = 8;
- if (is_xeon_cb32(pdev))
- dma->xor_align = 6;
- else
- dma->xor_align = 0;
}
}
}
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
- if (is_xeon_cb32(pdev)) {
- dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = NULL;
-
- dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
- dma->device_prep_dma_pq_val = NULL;
- }
-
/* starting with CB3.3 super extended descriptors are supported */
if (device->cap & IOAT_CAP_RAID16SS) {
char pool_name[14];
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index cc727ec..dd8b44a 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -518,7 +518,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
struct iop_adma_desc_slot *slot = NULL;
int init = iop_chan->slots_allocated ? 0 : 1;
struct iop_adma_platform_data *plat_data =
- iop_chan->device->pdev->dev.platform_data;
+ dev_get_platdata(&iop_chan->device->pdev->dev);
int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
/* Allocate descriptor slots */
@@ -1351,7 +1351,7 @@ static int iop_adma_remove(struct platform_device *dev)
struct iop_adma_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
struct iop_adma_chan *iop_chan;
- struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
+ struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
dma_async_device_unregister(&device->common);
@@ -1376,7 +1376,7 @@ static int iop_adma_probe(struct platform_device *pdev)
struct iop_adma_device *adev;
struct iop_adma_chan *iop_chan;
struct dma_device *dma_dev;
- struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
+ struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index d39c2cd..cb9c0bc 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1593,10 +1593,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
static enum dma_status idmac_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
- if (cookie != chan->cookie)
- return DMA_ERROR;
- return DMA_SUCCESS;
+ return dma_cookie_status(chan, cookie, txstate);
}
static int __init ipu_idmac_init(struct ipu *ipu)
@@ -1767,7 +1764,6 @@ static int ipu_remove(struct platform_device *pdev)
iounmap(ipu->reg_ic);
iounmap(ipu->reg_ipu);
tasklet_kill(&ipu->tasklet);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
new file mode 100644
index 0000000..a2c330f
--- /dev/null
+++ b/drivers/dma/k3dma.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME "k3-dma"
+#define DMA_ALIGN 3
+#define DMA_MAX_SIZE 0x1ffc
+
+#define INT_STAT 0x00
+#define INT_TC1 0x04
+#define INT_ERR1 0x0c
+#define INT_ERR2 0x10
+#define INT_TC1_MASK 0x18
+#define INT_ERR1_MASK 0x20
+#define INT_ERR2_MASK 0x24
+#define INT_TC1_RAW 0x600
+#define INT_ERR1_RAW 0x608
+#define INT_ERR2_RAW 0x610
+#define CH_PRI 0x688
+#define CH_STAT 0x690
+#define CX_CUR_CNT 0x704
+#define CX_LLI 0x800
+#define CX_CNT 0x810
+#define CX_SRC 0x814
+#define CX_DST 0x818
+#define CX_CFG 0x81c
+#define AXI_CFG 0x820
+#define AXI_CFG_DEFAULT 0x201201
+
+#define CX_LLI_CHAIN_EN 0x2
+#define CX_CFG_EN 0x1
+#define CX_CFG_MEM2PER (0x1 << 2)
+#define CX_CFG_PER2MEM (0x2 << 2)
+#define CX_CFG_SRCINCR (0x1 << 31)
+#define CX_CFG_DSTINCR (0x1 << 30)
+
+struct k3_desc_hw {
+ u32 lli;
+ u32 reserved[3];
+ u32 count;
+ u32 saddr;
+ u32 daddr;
+ u32 config;
+} __aligned(32);
+
+struct k3_dma_desc_sw {
+ struct virt_dma_desc vd;
+ dma_addr_t desc_hw_lli;
+ size_t desc_num;
+ size_t size;
+ struct k3_desc_hw desc_hw[0];
+};
+
+struct k3_dma_phy;
+
+struct k3_dma_chan {
+ u32 ccfg;
+ struct virt_dma_chan vc;
+ struct k3_dma_phy *phy;
+ struct list_head node;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+ enum dma_status status;
+};
+
+struct k3_dma_phy {
+ u32 idx;
+ void __iomem *base;
+ struct k3_dma_chan *vchan;
+ struct k3_dma_desc_sw *ds_run;
+ struct k3_dma_desc_sw *ds_done;
+};
+
+struct k3_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ struct list_head chan_pending;
+ struct k3_dma_phy *phy;
+ struct k3_dma_chan *chans;
+ struct clk *clk;
+ u32 dma_channels;
+ u32 dma_requests;
+};
+
+#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
+
+static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct k3_dma_chan, vc.chan);
+}
+
+static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
+{
+ u32 val = 0;
+
+ if (on) {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val |= CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ } else {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val &= ~CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ }
+}
+
+static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
+{
+ u32 val = 0;
+
+ k3_dma_pause_dma(phy, false);
+
+ val = 0x1 << phy->idx;
+ writel_relaxed(val, d->base + INT_TC1_RAW);
+ writel_relaxed(val, d->base + INT_ERR1_RAW);
+ writel_relaxed(val, d->base + INT_ERR2_RAW);
+}
+
+static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
+{
+ writel_relaxed(hw->lli, phy->base + CX_LLI);
+ writel_relaxed(hw->count, phy->base + CX_CNT);
+ writel_relaxed(hw->saddr, phy->base + CX_SRC);
+ writel_relaxed(hw->daddr, phy->base + CX_DST);
+ writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
+ writel_relaxed(hw->config, phy->base + CX_CFG);
+}
+
+static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
+{
+ u32 cnt = 0;
+
+ cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
+ cnt &= 0xffff;
+ return cnt;
+}
+
+static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
+{
+ return readl_relaxed(phy->base + CX_LLI);
+}
+
+static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
+{
+ return readl_relaxed(d->base + CH_STAT);
+}
+
+static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
+{
+ if (on) {
+ /* set same priority */
+ writel_relaxed(0x0, d->base + CH_PRI);
+
+ /* unmask irq */
+ writel_relaxed(0xffff, d->base + INT_TC1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
+ } else {
+ /* mask irq */
+ writel_relaxed(0x0, d->base + INT_TC1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR2_MASK);
+ }
+}
+
+static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
+{
+ struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
+ struct k3_dma_phy *p;
+ struct k3_dma_chan *c;
+ u32 stat = readl_relaxed(d->base + INT_STAT);
+ u32 tc1 = readl_relaxed(d->base + INT_TC1);
+ u32 err1 = readl_relaxed(d->base + INT_ERR1);
+ u32 err2 = readl_relaxed(d->base + INT_ERR2);
+ u32 i, irq_chan = 0;
+
+ while (stat) {
+ i = __ffs(stat);
+ stat &= (stat - 1);
+ if (likely(tc1 & BIT(i))) {
+ p = &d->phy[i];
+ c = p->vchan;
+ if (c) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ irq_chan |= BIT(i);
+ }
+ if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
+ dev_warn(d->slave.dev, "DMA ERR\n");
+ }
+
+ writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
+ writel_relaxed(err1, d->base + INT_ERR1_RAW);
+ writel_relaxed(err2, d->base + INT_ERR2_RAW);
+
+ if (irq_chan) {
+ tasklet_schedule(&d->task);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+static int k3_dma_start_txd(struct k3_dma_chan *c)
+{
+ struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+ if (!c->phy)
+ return -EAGAIN;
+
+ if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
+ return -EAGAIN;
+
+ if (vd) {
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+ /*
+ * fetch and remove request from vc->desc_issued
+ * so vc->desc_issued only contains desc pending
+ */
+ list_del(&ds->vd.node);
+ c->phy->ds_run = ds;
+ c->phy->ds_done = NULL;
+ /* start dma */
+ k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
+ return 0;
+ }
+ c->phy->ds_done = NULL;
+ c->phy->ds_run = NULL;
+ return -EAGAIN;
+}
+
+static void k3_dma_tasklet(unsigned long arg)
+{
+ struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
+ struct k3_dma_phy *p;
+ struct k3_dma_chan *c, *cn;
+ unsigned pch, pch_alloc = 0;
+
+ /* check new dma request of running channel in vc->desc_issued */
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&c->vc.lock);
+ p = c->phy;
+ if (p && p->ds_done) {
+ if (k3_dma_start_txd(c)) {
+ /* No current txd associated with this channel */
+ dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+ /* Mark this channel free */
+ c->phy = NULL;
+ p->vchan = NULL;
+ }
+ }
+ spin_unlock_irq(&c->vc.lock);
+ }
+
+ /* check new channel request in d->chan_pending */
+ spin_lock_irq(&d->lock);
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ p = &d->phy[pch];
+
+ if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
+ c = list_first_entry(&d->chan_pending,
+ struct k3_dma_chan, node);
+ /* remove from d->chan_pending */
+ list_del_init(&c->node);
+ pch_alloc |= 1 << pch;
+ /* Mark this channel allocated */
+ p->vchan = c;
+ c->phy = p;
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
+ }
+ }
+ spin_unlock_irq(&d->lock);
+
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ if (pch_alloc & (1 << pch)) {
+ p = &d->phy[pch];
+ c = p->vchan;
+ if (c) {
+ spin_lock_irq(&c->vc.lock);
+ k3_dma_start_txd(c);
+ spin_unlock_irq(&c->vc.lock);
+ }
+ }
+ }
+}
+
+static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void k3_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ list_del_init(&c->node);
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ vchan_free_chan_resources(&c->vc);
+ c->ccfg = 0;
+}
+
+static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ struct k3_dma_phy *p;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ p = c->phy;
+ ret = c->status;
+
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
+ } else if ((!p) || (!p->ds_run)) {
+ bytes = 0;
+ } else {
+ struct k3_dma_desc_sw *ds = p->ds_run;
+ u32 clli = 0, index = 0;
+
+ bytes = k3_dma_get_curr_cnt(d, p);
+ clli = k3_dma_get_curr_lli(p);
+ index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
+ for (; index < ds->desc_num; index++) {
+ bytes += ds->desc_hw[index].count;
+ /* end of lli */
+ if (!ds->desc_hw[index].lli)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ dma_set_residue(state, bytes);
+ return ret;
+}
+
+static void k3_dma_issue_pending(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ /* add request to vc->desc_issued */
+ if (vchan_issue_pending(&c->vc)) {
+ spin_lock(&d->lock);
+ if (!c->phy) {
+ if (list_empty(&c->node)) {
+ /* if new channel, add chan_pending */
+ list_add_tail(&c->node, &d->chan_pending);
+ /* check in tasklet */
+ tasklet_schedule(&d->task);
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+ }
+ }
+ spin_unlock(&d->lock);
+ } else
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
+ dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+ if ((num + 1) < ds->desc_num)
+ ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+ sizeof(struct k3_desc_hw);
+ ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
+ ds->desc_hw[num].count = len;
+ ds->desc_hw[num].saddr = src;
+ ds->desc_hw[num].daddr = dst;
+ ds->desc_hw[num].config = ccfg;
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t copy = 0;
+ int num = 0;
+
+ if (!len)
+ return NULL;
+
+ num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+ ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ ds->size = len;
+ ds->desc_num = num;
+ num = 0;
+
+ if (!c->ccfg) {
+ /* default is memtomem, without calling device_control */
+ c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
+ c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
+ c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
+ }
+
+ do {
+ copy = min_t(size_t, len, DMA_MAX_SIZE);
+ k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+ if (c->dir == DMA_MEM_TO_DEV) {
+ src += copy;
+ } else if (c->dir == DMA_DEV_TO_MEM) {
+ dst += copy;
+ } else {
+ src += copy;
+ dst += copy;
+ }
+ len -= copy;
+ } while (len);
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t len, avail, total = 0;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ int num = sglen, i;
+
+ if (sgl == 0)
+ return NULL;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ avail = sg_dma_len(sg);
+ if (avail > DMA_MAX_SIZE)
+ num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+ }
+
+ ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ ds->desc_num = num;
+ num = 0;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sg);
+ total += avail;
+
+ do {
+ len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = addr;
+ }
+
+ k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ ds->size = total;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ struct dma_slave_config *cfg = (void *)arg;
+ struct k3_dma_phy *p = c->phy;
+ unsigned long flags;
+ u32 maxburst = 0, val = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ LIST_HEAD(head);
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ if (cfg == NULL)
+ return -EINVAL;
+ c->dir = cfg->direction;
+ if (c->dir == DMA_DEV_TO_MEM) {
+ c->ccfg = CX_CFG_DSTINCR;
+ c->dev_addr = cfg->src_addr;
+ maxburst = cfg->src_maxburst;
+ width = cfg->src_addr_width;
+ } else if (c->dir == DMA_MEM_TO_DEV) {
+ c->ccfg = CX_CFG_SRCINCR;
+ c->dev_addr = cfg->dst_addr;
+ maxburst = cfg->dst_maxburst;
+ width = cfg->dst_addr_width;
+ }
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ val = __ffs(width);
+ break;
+ default:
+ val = 3;
+ break;
+ }
+ c->ccfg |= (val << 12) | (val << 16);
+
+ if ((maxburst == 0) || (maxburst > 16))
+ val = 16;
+ else
+ val = maxburst - 1;
+ c->ccfg |= (val << 20) | (val << 24);
+ c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+
+ /* specific request line */
+ c->ccfg |= c->vc.chan.chan_id << 4;
+ break;
+
+ case DMA_TERMINATE_ALL:
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /* Clear the tx descriptor lists */
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_get_all_descriptors(&c->vc, &head);
+ if (p) {
+ /* vchan is assigned to a pchan - stop the channel */
+ k3_dma_terminate_chan(p, d);
+ c->phy = NULL;
+ p->vchan = NULL;
+ p->ds_run = p->ds_done = NULL;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+ break;
+
+ case DMA_PAUSE:
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+ if (c->status == DMA_IN_PROGRESS) {
+ c->status = DMA_PAUSED;
+ if (p) {
+ k3_dma_pause_dma(p, false);
+ } else {
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+ }
+ }
+ break;
+
+ case DMA_RESUME:
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (c->status == DMA_PAUSED) {
+ c->status = DMA_IN_PROGRESS;
+ if (p) {
+ k3_dma_pause_dma(p, true);
+ } else if (!list_empty(&c->vc.desc_issued)) {
+ spin_lock(&d->lock);
+ list_add_tail(&c->node, &d->chan_pending);
+ spin_unlock(&d->lock);
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ break;
+ default:
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static void k3_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+
+ kfree(ds);
+}
+
+static struct of_device_id k3_pdma_dt_ids[] = {
+ { .compatible = "hisilicon,k3-dma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
+
+static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct k3_dma_dev *d = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+ if (request > d->dma_requests)
+ return NULL;
+
+ return dma_get_slave_channel(&(d->chans[request].vc.chan));
+}
+
+static int k3_dma_probe(struct platform_device *op)
+{
+ struct k3_dma_dev *d;
+ const struct of_device_id *of_id;
+ struct resource *iores;
+ int i, ret, irq = 0;
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
+ d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->base = devm_ioremap_resource(&op->dev, iores);
+ if (IS_ERR(d->base))
+ return PTR_ERR(d->base);
+
+ of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
+ if (of_id) {
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-channels", &d->dma_channels);
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-requests", &d->dma_requests);
+ }
+
+ d->clk = devm_clk_get(&op->dev, NULL);
+ if (IS_ERR(d->clk)) {
+ dev_err(&op->dev, "no dma clk\n");
+ return PTR_ERR(d->clk);
+ }
+
+ irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(&op->dev, irq,
+ k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+ if (ret)
+ return ret;
+
+ /* init phy channel */
+ d->phy = devm_kzalloc(&op->dev,
+ d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
+ if (d->phy == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_channels; i++) {
+ struct k3_dma_phy *p = &d->phy[i];
+
+ p->idx = i;
+ p->base = d->base + i * 0x40;
+ }
+
+ INIT_LIST_HEAD(&d->slave.channels);
+ dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ d->slave.dev = &op->dev;
+ d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
+ d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
+ d->slave.device_tx_status = k3_dma_tx_status;
+ d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
+ d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
+ d->slave.device_issue_pending = k3_dma_issue_pending;
+ d->slave.device_control = k3_dma_control;
+ d->slave.copy_align = DMA_ALIGN;
+ d->slave.chancnt = d->dma_requests;
+
+ /* init virtual channel */
+ d->chans = devm_kzalloc(&op->dev,
+ d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
+ if (d->chans == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_requests; i++) {
+ struct k3_dma_chan *c = &d->chans[i];
+
+ c->status = DMA_IN_PROGRESS;
+ INIT_LIST_HEAD(&c->node);
+ c->vc.desc_free = k3_dma_free_desc;
+ vchan_init(&c->vc, &d->slave);
+ }
+
+ /* Enable clock before accessing registers */
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ k3_dma_enable_dma(d, true);
+
+ ret = dma_async_device_register(&d->slave);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register((&op->dev)->of_node,
+ k3_of_dma_simple_xlate, d);
+ if (ret)
+ goto of_dma_register_fail;
+
+ spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->chan_pending);
+ tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
+ platform_set_drvdata(op, d);
+ dev_info(&op->dev, "initialized\n");
+
+ return 0;
+
+of_dma_register_fail:
+ dma_async_device_unregister(&d->slave);
+ return ret;
+}
+
+static int k3_dma_remove(struct platform_device *op)
+{
+ struct k3_dma_chan *c, *cn;
+ struct k3_dma_dev *d = platform_get_drvdata(op);
+
+ dma_async_device_unregister(&d->slave);
+ of_dma_controller_free((&op->dev)->of_node);
+
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+ tasklet_kill(&d->task);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int k3_dma_suspend(struct device *dev)
+{
+ struct k3_dma_dev *d = dev_get_drvdata(dev);
+ u32 stat = 0;
+
+ stat = k3_dma_get_chan_stat(d);
+ if (stat) {
+ dev_warn(d->slave.dev,
+ "chan %d is running fail to suspend\n", stat);
+ return -1;
+ }
+ k3_dma_enable_dma(d, false);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int k3_dma_resume(struct device *dev)
+{
+ struct k3_dma_dev *d = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+ k3_dma_enable_dma(d, true);
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+
+static struct platform_driver k3_pdma_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &k3_dma_pmops,
+ .of_match_table = k3_pdma_dt_ids,
+ },
+ .probe = k3_dma_probe,
+ .remove = k3_dma_remove,
+};
+
+module_platform_driver(k3_pdma_driver);
+
+MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
+MODULE_ALIAS("platform:k3dma");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c26699f..ff8d7827 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -18,7 +18,9 @@
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
#include "dmaengine.h"
@@ -47,6 +49,8 @@
#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
#define DCSR_EORINTR (1 << 9) /* The end of Receive */
+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
+ (((n) & 0x3f) << 2))
#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
@@ -69,7 +73,7 @@
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
#define PDMA_ALIGNMENT 3
-#define PDMA_MAX_DESC_BYTES 0x1000
+#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
struct mmp_pdma_desc_hw {
u32 ddadr; /* Points to the next descriptor + flags */
@@ -94,6 +98,9 @@ struct mmp_pdma_chan {
struct mmp_pdma_phy *phy;
enum dma_transfer_direction dir;
+ struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
+ * is in cyclic mode */
+
/* channel's basic info */
struct tasklet_struct tasklet;
u32 dcmd;
@@ -105,6 +112,7 @@ struct mmp_pdma_chan {
struct list_head chain_pending; /* Link descriptors queue for pending */
struct list_head chain_running; /* Link descriptors queue for running */
bool idle; /* channel statue machine */
+ bool byte_align;
struct dma_pool *desc_pool; /* Descriptors pool */
};
@@ -121,6 +129,7 @@ struct mmp_pdma_device {
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ spinlock_t phy_lock; /* protect alloc/free phy channels */
};
#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
@@ -137,15 +146,21 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
static void enable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dalgn;
if (!phy->vchan)
return;
- reg = phy->vchan->drcmr;
- reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+ reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ dalgn = readl(phy->base + DALGN);
+ if (phy->vchan->byte_align)
+ dalgn |= 1 << phy->idx;
+ else
+ dalgn &= ~(1 << phy->idx);
+ writel(dalgn, phy->base + DALGN);
+
reg = (phy->idx << 2) + DCSR;
writel(readl(phy->base + reg) | DCSR_RUN,
phy->base + reg);
@@ -218,7 +233,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
{
int prio, i;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
- struct mmp_pdma_phy *phy;
+ struct mmp_pdma_phy *phy, *found = NULL;
+ unsigned long flags;
/*
* dma channel priorities
@@ -227,6 +243,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
* ch 8 - 11, 24 - 27 <--> (2)
* ch 12 - 15, 28 - 31 <--> (3)
*/
+
+ spin_lock_irqsave(&pdev->phy_lock, flags);
for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
for (i = 0; i < pdev->dma_channels; i++) {
if (prio != ((i & 0xf) >> 2))
@@ -234,31 +252,34 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
phy = &pdev->phy[i];
if (!phy->vchan) {
phy->vchan = pchan;
- return phy;
+ found = phy;
+ goto out_unlock;
}
}
}
- return NULL;
+out_unlock:
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+ return found;
}
-/* desc->tx_list ==> pending list */
-static void append_pending_queue(struct mmp_pdma_chan *chan,
- struct mmp_pdma_desc_sw *desc)
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
{
- struct mmp_pdma_desc_sw *tail =
- to_mmp_pdma_desc(chan->chain_pending.prev);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+ unsigned long flags;
+ u32 reg;
- if (list_empty(&chan->chain_pending))
- goto out_splice;
+ if (!pchan->phy)
+ return;
- /* one irq per queue, even appended */
- tail->desc.ddadr = desc->async_tx.phys;
- tail->desc.dcmd &= ~DCMD_ENDIRQEN;
+ /* clear the channel mapping in DRCMR */
+ reg = DRCMR(pchan->phy->vchan->drcmr);
+ writel(0, pchan->phy->base + reg);
- /* softly link to pending list */
-out_splice:
- list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+ pchan->phy->vchan = NULL;
+ pchan->phy = NULL;
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
}
/**
@@ -277,10 +298,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
if (list_empty(&chan->chain_pending)) {
/* chance to re-fetch phy channel with higher prio */
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
dev_dbg(chan->dev, "no pending list\n");
return;
}
@@ -326,14 +344,16 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(&child->async_tx);
}
- append_pending_queue(chan, desc);
+ /* softly link to pending list - desc->tx_list ==> pending list */
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie;
}
-struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+static struct mmp_pdma_desc_sw *
+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
dma_addr_t pdesc;
@@ -377,10 +397,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
dev_err(chan->dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
chan->idle = true;
chan->dev_addr = 0;
return 1;
@@ -411,10 +428,7 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
chan->desc_pool = NULL;
chan->idle = true;
chan->dev_addr = 0;
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
return;
}
@@ -434,6 +448,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
return NULL;
chan = to_mmp_pdma_chan(dchan);
+ chan->byte_align = false;
if (!chan->dir) {
chan->dir = DMA_MEM_TO_MEM;
@@ -450,6 +465,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
}
copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+ if (dma_src & 0x7 || dma_dst & 0x7)
+ chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
new->desc.dsadr = dma_src;
@@ -486,6 +503,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->cyclic_first = NULL;
+
return &first->async_tx;
fail:
@@ -509,12 +528,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
if ((sgl == NULL) || (sg_len == 0))
return NULL;
+ chan->byte_align = false;
+
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
avail = sg_dma_len(sgl);
do {
len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+ if (addr & 0x7)
+ chan->byte_align = true;
/* allocate and populate the descriptor */
new = mmp_pdma_alloc_descriptor(chan);
@@ -557,6 +580,94 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->dir = dir;
+ chan->cyclic_first = NULL;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
+ struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct mmp_pdma_chan *chan;
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+ dma_addr_t dma_src, dma_dst;
+
+ if (!dchan || !len || !period_len)
+ return NULL;
+
+ /* the buffer length must be a multiple of period_len */
+ if (len % period_len != 0)
+ return NULL;
+
+ if (period_len > PDMA_MAX_DESC_BYTES)
+ return NULL;
+
+ chan = to_mmp_pdma_chan(dchan);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dma_src = buf_addr;
+ dma_dst = chan->dev_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ dma_dst = buf_addr;
+ dma_src = chan->dev_addr;
+ break;
+ default:
+ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+ return NULL;
+ }
+
+ chan->dir = direction;
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
+ (DCMD_LENGTH & period_len);
+ new->desc.dsadr = dma_src;
+ new->desc.dtadr = dma_dst;
+
+ if (!first)
+ first = new;
+ else
+ prev->desc.ddadr = new->async_tx.phys;
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= period_len;
+
+ if (chan->dir == DMA_MEM_TO_DEV)
+ dma_src += period_len;
+ else
+ dma_dst += period_len;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ first->async_tx.flags = flags; /* client is in control of this ack */
+ first->async_tx.cookie = -EBUSY;
+
+ /* make the cyclic link */
+ new->desc.ddadr = first->async_tx.phys;
+ chan->cyclic_first = first;
+
return &first->async_tx;
fail:
@@ -581,10 +692,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
disable_chan(chan->phy);
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
spin_lock_irqsave(&chan->desc_lock, flags);
mmp_pdma_free_desc_list(chan, &chan->chain_pending);
mmp_pdma_free_desc_list(chan, &chan->chain_running);
@@ -619,8 +727,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
chan->dcmd |= DCMD_BURST32;
chan->dir = cfg->direction;
- chan->drcmr = cfg->slave_id;
chan->dev_addr = addr;
+ /* FIXME: drivers should be ported over to use the filter
+ * function. Once that's done, the following two lines can
+ * be removed.
+ */
+ if (cfg->slave_id)
+ chan->drcmr = cfg->slave_id;
break;
default:
return -ENOSYS;
@@ -632,15 +745,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
- ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
/**
@@ -669,29 +774,51 @@ static void dma_do_tasklet(unsigned long data)
LIST_HEAD(chain_cleanup);
unsigned long flags;
- /* submit pending list; callback for each desc; free desc */
+ if (chan->cyclic_first) {
+ dma_async_tx_callback cb = NULL;
+ void *cb_data = NULL;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ desc = chan->cyclic_first;
+ cb = desc->async_tx.callback;
+ cb_data = desc->async_tx.callback_param;
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (cb)
+ cb(cb_data);
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->chain_running)) {
- dma_cookie_t cookie;
+ return;
+ }
- desc = to_mmp_pdma_desc(chan->chain_running.prev);
- cookie = desc->async_tx.cookie;
- dma_cookie_complete(&desc->async_tx);
+ /* submit pending list; callback for each desc; free desc */
+ spin_lock_irqsave(&chan->desc_lock, flags);
- dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+ /*
+ * move the descriptors to a temporary list so we can drop
+ * the lock during the entire cleanup operation
+ */
+ list_del(&desc->node);
+ list_add(&desc->node, &chain_cleanup);
+
+ /*
+ * Look for the first list entry which has the ENDIRQEN flag
+ * set. That is the descriptor we got an interrupt for, so
+ * complete that transaction and its cookie.
+ */
+ if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+ dma_cookie_t cookie = desc->async_tx.cookie;
+ dma_cookie_complete(&desc->async_tx);
+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ break;
+ }
}
/*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
+ * The hardware is idle and ready for more when the
+ * chain_running list is empty.
*/
- list_splice_tail_init(&chan->chain_running, &chain_cleanup);
-
- /* the hardware is now idle and ready for more */
- chan->idle = true;
+ chan->idle = list_empty(&chan->chain_running);
/* Start any pending transactions automatically */
start_pending_queue(chan);
@@ -763,6 +890,39 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_pdma_device *d = ofdma->of_dma_data;
+ struct dma_chan *chan, *candidate;
+
+retry:
+ candidate = NULL;
+
+ /* walk the list of channels registered with the current instance and
+ * find one that is currently unused */
+ list_for_each_entry(chan, &d->device.channels, device_node)
+ if (chan->client_count == 0) {
+ candidate = chan;
+ break;
+ }
+
+ if (!candidate)
+ return NULL;
+
+ /* dma_get_slave_channel will return NULL if we lost a race between
+ * the lookup and the reservation */
+ chan = dma_get_slave_channel(candidate);
+
+ if (chan) {
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+ c->drcmr = dma_spec->args[0];
+ return chan;
+ }
+
+ goto retry;
+}
+
static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
@@ -777,10 +937,9 @@ static int mmp_pdma_probe(struct platform_device *op)
return -ENOMEM;
pdev->dev = &op->dev;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
+ spin_lock_init(&pdev->phy_lock);
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
pdev->base = devm_ioremap_resource(pdev->dev, iores);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
@@ -825,13 +984,15 @@ static int mmp_pdma_probe(struct platform_device *op)
dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
- dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
+ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
pdev->device.dev = &op->dev;
pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
pdev->device.device_tx_status = mmp_pdma_tx_status;
pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
pdev->device.device_issue_pending = mmp_pdma_issue_pending;
pdev->device.device_control = mmp_pdma_control;
pdev->device.copy_align = PDMA_ALIGNMENT;
@@ -847,7 +1008,17 @@ static int mmp_pdma_probe(struct platform_device *op)
return ret;
}
- dev_info(pdev->device.dev, "initialized\n");
+ if (op->dev.of_node) {
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(op->dev.of_node,
+ mmp_pdma_dma_xlate, pdev);
+ if (ret < 0) {
+ dev_err(&op->dev, "of_dma_controller_register failed\n");
+ return ret;
+ }
+ }
+
+ dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
return 0;
}
@@ -867,6 +1038,19 @@ static struct platform_driver mmp_pdma_driver = {
.remove = mmp_pdma_remove,
};
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+ if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+ return false;
+
+ c->drcmr = *(unsigned int *) param;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
module_platform_driver(mmp_pdma_driver);
MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 9b93665..38cb517 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -460,7 +460,8 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
- dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+ tdmac->buf_len - tdmac->pos);
return tdmac->status;
}
@@ -549,9 +550,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
}
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
tdev->base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(tdev->base))
return PTR_ERR(tdev->base);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2d95673..2fe4353 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -556,15 +556,7 @@ static enum dma_status
mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&mchan->lock, flags);
- ret = dma_cookie_status(chan, cookie, txstate);
- spin_unlock_irqrestore(&mchan->lock, flags);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
/* Prepare descriptor for memory to memory copy */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 200f1a3..536dcb8 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -64,7 +64,7 @@ static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
int src_idx)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_src_addr[src_idx];
+ return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
}
@@ -107,32 +107,32 @@ static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
int index, dma_addr_t addr)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- hw_desc->phy_src_addr[index] = addr;
+ hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
if (desc->type == DMA_XOR)
hw_desc->desc_command |= (1 << index);
}
static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
{
- return __raw_readl(XOR_CURR_DESC(chan));
+ return readl_relaxed(XOR_CURR_DESC(chan));
}
static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
u32 next_desc_addr)
{
- __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
+ writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
}
static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
{
- u32 val = __raw_readl(XOR_INTR_MASK(chan));
+ u32 val = readl_relaxed(XOR_INTR_MASK(chan));
val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
- __raw_writel(val, XOR_INTR_MASK(chan));
+ writel_relaxed(val, XOR_INTR_MASK(chan));
}
static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
{
- u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+ u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
return intr_cause;
}
@@ -149,13 +149,13 @@ static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = ~(1 << (chan->idx * 16));
dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
{
u32 val = 0xFFFF0000 >> (chan->idx * 16);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static int mv_can_chain(struct mv_xor_desc_slot *desc)
@@ -173,7 +173,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
enum dma_transaction_type type)
{
u32 op_mode;
- u32 config = __raw_readl(XOR_CONFIG(chan));
+ u32 config = readl_relaxed(XOR_CONFIG(chan));
switch (type) {
case DMA_XOR:
@@ -192,7 +192,14 @@ static void mv_set_mode(struct mv_xor_chan *chan,
config &= ~0x7;
config |= op_mode;
- __raw_writel(config, XOR_CONFIG(chan));
+
+#if defined(__BIG_ENDIAN)
+ config |= XOR_DESCRIPTOR_SWAP;
+#else
+ config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
+
+ writel_relaxed(config, XOR_CONFIG(chan));
chan->current_type = type;
}
@@ -201,14 +208,14 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
u32 activation;
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
- activation = __raw_readl(XOR_ACTIVATION(chan));
+ activation = readl_relaxed(XOR_ACTIVATION(chan));
activation |= 0x1;
- __raw_writel(activation, XOR_ACTIVATION(chan));
+ writel_relaxed(activation, XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
{
- u32 state = __raw_readl(XOR_ACTIVATION(chan));
+ u32 state = readl_relaxed(XOR_ACTIVATION(chan));
state = (state >> 4) & 0x3;
@@ -647,7 +654,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
- __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+ __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
return sw_desc ? &sw_desc->async_tx : NULL;
}
@@ -755,22 +762,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
{
u32 val;
- val = __raw_readl(XOR_CONFIG(chan));
+ val = readl_relaxed(XOR_CONFIG(chan));
dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
- val = __raw_readl(XOR_ACTIVATION(chan));
+ val = readl_relaxed(XOR_ACTIVATION(chan));
dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_CAUSE(chan));
+ val = readl_relaxed(XOR_INTR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_MASK(chan));
+ val = readl_relaxed(XOR_INTR_MASK(chan));
dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_CAUSE(chan));
+ val = readl_relaxed(XOR_ERROR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_ADDR(chan));
+ val = readl_relaxed(XOR_ERROR_ADDR(chan));
dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
@@ -1029,10 +1036,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
struct dma_device *dma_dev;
mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
- if (!mv_chan) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
+ if (!mv_chan)
+ return ERR_PTR(-ENOMEM);
mv_chan->idx = idx;
mv_chan->irq = irq;
@@ -1166,7 +1171,7 @@ static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
struct mv_xor_device *xordev;
- struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
int i, ret;
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c619359..06b067f 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -29,8 +29,10 @@
#define MV_XOR_THRESHOLD 1
#define MV_XOR_MAX_CHANNELS 2
+/* Values for the XOR_CONFIG register */
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
+#define XOR_DESCRIPTOR_SWAP BIT(14)
#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
@@ -143,7 +145,16 @@ struct mv_xor_desc_slot {
#endif
};
-/* This structure describes XOR descriptor size 64bytes */
+/*
+ * This structure describes XOR descriptor size 64bytes. The
+ * mv_phy_src_idx() macro must be used when indexing the values of the
+ * phy_src_addr[] array. This is due to the fact that the 'descriptor
+ * swap' feature, used on big endian systems, swaps descriptors data
+ * within blocks of 8 bytes. So two consecutive values of the
+ * phy_src_addr[] array are actually swapped in big-endian, which
+ * explains the different mv_phy_src_idx() implementation.
+ */
+#if defined(__LITTLE_ENDIAN)
struct mv_xor_desc {
u32 status; /* descriptor execution status */
u32 crc32_result; /* result of CRC-32 calculation */
@@ -155,6 +166,21 @@ struct mv_xor_desc {
u32 reserved0;
u32 reserved1;
};
+#define mv_phy_src_idx(src_idx) (src_idx)
+#else
+struct mv_xor_desc {
+ u32 crc32_result; /* result of CRC-32 calculation */
+ u32 status; /* descriptor execution status */
+ u32 phy_next_desc; /* next descriptor address pointer */
+ u32 desc_command; /* type of operation to be carried out */
+ u32 phy_dest_addr; /* destination block address */
+ u32 byte_count; /* size of src/dst blocks in bytes */
+ u32 phy_src_addr[8]; /* source block addresses */
+ u32 reserved1;
+ u32 reserved0;
+};
+#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
+#endif
#define to_mv_sw_desc(addr_hw_desc) \
container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 7195930..ccd13df 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -23,7 +23,6 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/fsl/mxs-dma.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -197,24 +196,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
return container_of(chan, struct mxs_dma_chan, chan);
}
-int mxs_dma_is_apbh(struct dma_chan *chan)
-{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
- return dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
-
-int mxs_dma_is_apbx(struct dma_chan *chan)
-{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
- return !dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
-
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -349,13 +330,9 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_data *data = chan->private;
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- if (data)
- mxs_chan->chan_irq = data->chan_irq;
-
mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
GFP_KERNEL);
@@ -622,10 +599,8 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- dma_cookie_t last_used;
- last_used = chan->cookie;
- dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
return mxs_chan->status;
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 75334bd..0b88dd3 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -160,7 +160,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
count = of_property_count_strings(np, "dma-names");
if (count < 0) {
- pr_err("%s: dma-names property missing or empty\n", __func__);
+ pr_err("%s: dma-names property of node '%s' missing or empty\n",
+ __func__, np->full_name);
return NULL;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 0bbdea5..61fdc54 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -564,14 +564,7 @@ static void pd_free_chan_resources(struct dma_chan *chan)
static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct pch_dma_chan *pd_chan = to_pd_chan(chan);
- enum dma_status ret;
-
- spin_lock_irq(&pd_chan->lock);
- ret = dma_cookie_status(chan, cookie, txstate);
- spin_unlock_irq(&pd_chan->lock);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void pd_issue_pending(struct dma_chan *chan)
@@ -1036,3 +1029,4 @@ MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa645d8..a562d24 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -545,6 +545,8 @@ struct dma_pl330_chan {
/* List of to be xfered descriptors */
struct list_head work_list;
+ /* List of completed descriptors */
+ struct list_head completed_list;
/* Pointer to the DMAC that manages this channel,
* NULL if the channel is available to be acquired.
@@ -2198,66 +2200,6 @@ to_desc(struct dma_async_tx_descriptor *tx)
return container_of(tx, struct dma_pl330_desc, txd);
}
-static inline void free_desc_list(struct list_head *list)
-{
- struct dma_pl330_dmac *pdmac;
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- /* Finish off the work list */
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
- void *param;
-
- /* All desc in a list belong to same channel */
- pch = desc->pchan;
- callback = desc->txd.callback;
- param = desc->txd.callback_param;
-
- if (callback)
- callback(param);
-
- desc->pchan = NULL;
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- pdmac = pch->dmac;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
- list_splice_tail_init(list, &pdmac->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
-}
-
-static inline void handle_cyclic_desc_list(struct list_head *list)
-{
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
-
- /* Change status to reload it */
- desc->status = PREP;
- pch = desc->pchan;
- callback = desc->txd.callback;
- if (callback)
- callback(desc->txd.callback_param);
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- spin_lock_irqsave(&pch->lock, flags);
- list_splice_tail_init(list, &pch->work_list);
- spin_unlock_irqrestore(&pch->lock, flags);
-}
-
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
@@ -2291,7 +2233,6 @@ static void pl330_tasklet(unsigned long data)
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
- LIST_HEAD(list);
spin_lock_irqsave(&pch->lock, flags);
@@ -2300,7 +2241,7 @@ static void pl330_tasklet(unsigned long data)
if (desc->status == DONE) {
if (!pch->cyclic)
dma_cookie_complete(&desc->txd);
- list_move_tail(&desc->node, &list);
+ list_move_tail(&desc->node, &pch->completed_list);
}
/* Try to submit a req imm. next to the last completed cookie */
@@ -2309,12 +2250,31 @@ static void pl330_tasklet(unsigned long data)
/* Make sure the PL330 Channel thread is active */
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
- spin_unlock_irqrestore(&pch->lock, flags);
+ while (!list_empty(&pch->completed_list)) {
+ dma_async_tx_callback callback;
+ void *callback_param;
- if (pch->cyclic)
- handle_cyclic_desc_list(&list);
- else
- free_desc_list(&list);
+ desc = list_first_entry(&pch->completed_list,
+ struct dma_pl330_desc, node);
+
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
+ if (pch->cyclic) {
+ desc->status = PREP;
+ list_move_tail(&desc->node, &pch->work_list);
+ } else {
+ desc->status = FREE;
+ list_move_tail(&desc->node, &pch->dmac->desc_pool);
+ }
+
+ if (callback) {
+ spin_unlock_irqrestore(&pch->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&pch->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&pch->lock, flags);
}
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -2409,7 +2369,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_desc *desc, *_dt;
+ struct dma_pl330_desc *desc;
unsigned long flags;
struct dma_pl330_dmac *pdmac = pch->dmac;
struct dma_slave_config *slave_config;
@@ -2423,12 +2383,18 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
- list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
- desc->status = DONE;
- list_move_tail(&desc->node, &list);
+ list_for_each_entry(desc, &pch->work_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
}
- list_splice_tail_init(&list, &pdmac->desc_pool);
+ list_for_each_entry(desc, &pch->completed_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
+ }
+
+ list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
@@ -2814,6 +2780,28 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
return &desc->txd;
}
+static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+ struct dma_pl330_desc *first)
+{
+ unsigned long flags;
+ struct dma_pl330_desc *desc;
+
+ if (!first)
+ return;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2822,7 +2810,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
struct scatterlist *sg;
- unsigned long flags;
int i;
dma_addr_t addr;
@@ -2842,20 +2829,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_err(pch->dmac->pif.dev,
"%s:%d Unable to fetch desc\n",
__func__, __LINE__);
- if (!first)
- return NULL;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
-
- while (!list_empty(&first->node)) {
- desc = list_entry(first->node.next,
- struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
- }
-
- list_move_tail(&first->node, &pdmac->desc_pool);
-
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ __pl330_giveback_desc(pdmac, first);
return NULL;
}
@@ -2896,6 +2870,25 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
return IRQ_NONE;
}
+#define PL330_DMA_BUSWIDTHS \
+ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -2908,7 +2901,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
int i, ret, irq;
int num_chan;
- pdat = adev->dev.platform_data;
+ pdat = dev_get_platdata(&adev->dev);
/* Allocate a new DMAC and its Channels */
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
@@ -2971,6 +2964,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->work_list);
+ INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.device = pd;
@@ -3000,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
pd->device_issue_pending = pl330_issue_pending;
+ pd->device_slave_caps = pl330_dma_device_slave_caps;
ret = dma_async_device_register(pd);
if (ret) {
@@ -3015,6 +3010,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
"unable to register DMA to the generic DT DMA helpers\n");
}
}
+ /*
+ * This is the limit for transfers with a buswidth of 1, larger
+ * buswidths will have larger limits.
+ */
+ ret = dma_set_max_seg_size(&adev->dev, 1900800);
+ if (ret)
+ dev_err(&adev->dev, "unable to set the seg size\n");
+
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 5c1dee2..dadd9e01 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -22,3 +22,13 @@ config SUDMAC
depends on SH_DMAE_BASE
help
Enable support for the Renesas SUDMAC controllers.
+
+config RCAR_HPB_DMAE
+ tristate "Renesas R-Car HPB DMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas R-Car series DMA controllers.
+
+config SHDMA_R8A73A4
+ def_bool y
+ depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index c962138..e856af2 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,3 +1,9 @@
obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+shdma-y := shdmac.o
+ifeq ($(CONFIG_OF),y)
+shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o
+endif
+shdma-objs := $(shdma-y)
obj-$(CONFIG_SUDMAC) += sudmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
new file mode 100644
index 0000000..45a5202
--- /dev/null
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This file is based on the drivers/dma/sh/shdma.c
+ *
+ * Renesas SuperH DMA Engine support
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - max DMA size is 16MB.
+ *
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_data/dma-rcar-hpbdma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/shdma-base.h>
+#include <linux/slab.h>
+
+/* DMA channel registers */
+#define HPB_DMAE_DSAR0 0x00
+#define HPB_DMAE_DDAR0 0x04
+#define HPB_DMAE_DTCR0 0x08
+#define HPB_DMAE_DSAR1 0x0C
+#define HPB_DMAE_DDAR1 0x10
+#define HPB_DMAE_DTCR1 0x14
+#define HPB_DMAE_DSASR 0x18
+#define HPB_DMAE_DDASR 0x1C
+#define HPB_DMAE_DTCSR 0x20
+#define HPB_DMAE_DPTR 0x24
+#define HPB_DMAE_DCR 0x28
+#define HPB_DMAE_DCMDR 0x2C
+#define HPB_DMAE_DSTPR 0x30
+#define HPB_DMAE_DSTSR 0x34
+#define HPB_DMAE_DDBGR 0x38
+#define HPB_DMAE_DDBGR2 0x3C
+#define HPB_DMAE_CHAN(n) (0x40 * (n))
+
+/* DMA command register (DCMDR) bits */
+#define HPB_DMAE_DCMDR_BDOUT BIT(7)
+#define HPB_DMAE_DCMDR_DQSPD BIT(6)
+#define HPB_DMAE_DCMDR_DQSPC BIT(5)
+#define HPB_DMAE_DCMDR_DMSPD BIT(4)
+#define HPB_DMAE_DCMDR_DMSPC BIT(3)
+#define HPB_DMAE_DCMDR_DQEND BIT(2)
+#define HPB_DMAE_DCMDR_DNXT BIT(1)
+#define HPB_DMAE_DCMDR_DMEN BIT(0)
+
+/* DMA forced stop register (DSTPR) bits */
+#define HPB_DMAE_DSTPR_DMSTP BIT(0)
+
+/* DMA status register (DSTSR) bits */
+#define HPB_DMAE_DSTSR_DMSTS BIT(0)
+
+/* DMA common registers */
+#define HPB_DMAE_DTIMR 0x00
+#define HPB_DMAE_DINTSR0 0x0C
+#define HPB_DMAE_DINTSR1 0x10
+#define HPB_DMAE_DINTCR0 0x14
+#define HPB_DMAE_DINTCR1 0x18
+#define HPB_DMAE_DINTMR0 0x1C
+#define HPB_DMAE_DINTMR1 0x20
+#define HPB_DMAE_DACTSR0 0x24
+#define HPB_DMAE_DACTSR1 0x28
+#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
+#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
+#define HPB_DMAE_HPB_DMLVLR0 0x160
+#define HPB_DMAE_HPB_DMLVLR1 0x164
+#define HPB_DMAE_HPB_DMSHPT0 0x168
+#define HPB_DMAE_HPB_DMSHPT1 0x16C
+
+#define HPB_DMA_SLAVE_NUMBER 256
+#define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
+
+struct hpb_dmae_chan {
+ struct shdma_chan shdma_chan;
+ int xfer_mode; /* DMA transfer mode */
+#define XFER_SINGLE 1
+#define XFER_DOUBLE 2
+ unsigned plane_idx; /* current DMA information set */
+ bool first_desc; /* first/next transfer */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ void __iomem *base;
+ const struct hpb_dmae_slave_config *cfg;
+ char dev_id[16]; /* unique name per DMAC of channel */
+};
+
+struct hpb_dmae_device {
+ struct shdma_dev shdma_dev;
+ spinlock_t reg_lock; /* comm_reg operation lock */
+ struct hpb_dmae_pdata *pdata;
+ void __iomem *chan_reg;
+ void __iomem *comm_reg;
+ void __iomem *reset_reg;
+ void __iomem *mode_reg;
+};
+
+struct hpb_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct hpb_desc {
+ struct shdma_desc shdma_desc;
+ struct hpb_dmae_regs hw;
+ unsigned plane_idx;
+};
+
+#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
+#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
+ struct hpb_dmae_device, shdma_dev.dma_dev)
+
+static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
+{
+ iowrite32(data, hpb_dc->base + reg);
+}
+
+static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
+{
+ return ioread32(hpb_dc->base + reg);
+}
+
+static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
+}
+
+static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
+}
+
+static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ u32 v;
+
+ if (ch < 32)
+ v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
+ else
+ v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
+ return v & 0x1;
+}
+
+static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ if (ch < 32)
+ iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
+ else
+ iowrite32((0x1 << (ch - 32)),
+ hpbdev->comm_reg + HPB_DMAE_DINTCR1);
+}
+
+static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ iowrite32(data, hpbdev->mode_reg);
+}
+
+static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
+{
+ return ioread32(hpbdev->mode_reg);
+}
+
+static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ u32 intreg;
+
+ spin_lock_irq(&hpbdev->reg_lock);
+ if (ch < 32) {
+ intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+ iowrite32(BIT(ch) | intreg,
+ hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+ } else {
+ intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+ iowrite32(BIT(ch - 32) | intreg,
+ hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+ }
+ spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ u32 rstr;
+ int timeout = 10000; /* 100 ms */
+
+ spin_lock(&hpbdev->reg_lock);
+ rstr = ioread32(hpbdev->reset_reg);
+ rstr |= data;
+ iowrite32(rstr, hpbdev->reset_reg);
+ do {
+ rstr = ioread32(hpbdev->reset_reg);
+ if ((rstr & data) == data)
+ break;
+ udelay(10);
+ } while (timeout--);
+
+ if (timeout < 0)
+ dev_err(hpbdev->shdma_dev.dma_dev.dev,
+ "%s timeout\n", __func__);
+
+ rstr &= ~data;
+ iowrite32(rstr, hpbdev->reset_reg);
+ spin_unlock(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
+ u32 mask, u32 data)
+{
+ u32 mode;
+
+ spin_lock_irq(&hpbdev->reg_lock);
+ mode = asyncmdr_read(hpbdev);
+ mode &= ~mask;
+ mode |= data;
+ asyncmdr_write(hpbdev, mode);
+ spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
+{
+ dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
+}
+
+static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
+{
+ u32 ch;
+
+ for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
+ hsrstr_write(hpbdev, ch);
+}
+
+static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
+ int i;
+
+ switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
+ case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
+ default:
+ i = XMIT_SZ_8BIT;
+ break;
+ case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
+ i = XMIT_SZ_16BIT;
+ break;
+ case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
+ i = XMIT_SZ_32BIT;
+ break;
+ }
+ return pdata->ts_shift[i];
+}
+
+static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
+ struct hpb_dmae_regs *hw, unsigned plane)
+{
+ ch_reg_write(hpb_chan, hw->sar,
+ plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
+ ch_reg_write(hpb_chan, hw->dar,
+ plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
+ ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
+ plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+}
+
+static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
+{
+ ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
+ HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
+}
+
+static void hpb_dmae_halt(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+
+ ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
+ ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
+}
+
+static const struct hpb_dmae_slave_config *
+hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ int i;
+
+ if (slave_id >= HPB_DMA_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0; i < pdata->num_slaves; i++)
+ if (pdata->slaves[i].id == slave_id)
+ return pdata->slaves + i;
+
+ return NULL;
+}
+
+static void hpb_dmae_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ struct hpb_dmae_device *hpbdev = to_dev(chan);
+ struct hpb_desc *desc = to_desc(sdesc);
+
+ if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
+ hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
+
+ desc->plane_idx = chan->plane_idx;
+ hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
+ hpb_dmae_start(chan, !chan->first_desc);
+
+ if (chan->xfer_mode == XFER_DOUBLE) {
+ chan->plane_idx ^= 1;
+ chan->first_desc = false;
+ }
+}
+
+static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ /*
+ * This is correct since we always have at most single
+ * outstanding DMA transfer per channel, and by the time
+ * we get completion interrupt the transfer is completed.
+ * This will change if we ever use alternating DMA
+ * information sets and submit two descriptors at once.
+ */
+ return true;
+}
+
+static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ struct hpb_dmae_device *hpbdev = to_dev(chan);
+ int ch = chan->cfg->dma_ch;
+
+ /* Check Complete DMA Transfer */
+ if (dintsr_read(hpbdev, ch)) {
+ /* Clear Interrupt status */
+ dintcr_write(hpbdev, ch);
+ return true;
+ }
+ return false;
+}
+
+static int hpb_dmae_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct hpb_desc *desc = to_desc(sdesc);
+
+ if (*len > (size_t)HPB_DMA_TCR_MAX)
+ *len = (size_t)HPB_DMA_TCR_MAX;
+
+ desc->hw.sar = src;
+ desc->hw.dar = dst;
+ desc->hw.tcr = *len;
+
+ return 0;
+}
+
+static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct hpb_desc *desc = to_desc(sdesc);
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ u32 tcr = ch_reg_read(chan, desc->plane_idx ?
+ HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+
+ return (desc->hw.tcr - tcr) << chan->xmit_shift;
+}
+
+static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
+
+ return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
+}
+
+static int
+hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
+ const struct hpb_dmae_slave_config *cfg)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ const struct hpb_dmae_channel *channel = pdata->channels;
+ int slave_id = cfg->id;
+ int i, err;
+
+ for (i = 0; i < pdata->num_channels; i++, channel++) {
+ if (channel->s_id == slave_id) {
+ struct device *dev = hpb_chan->shdma_chan.dev;
+
+ hpb_chan->base = hpbdev->chan_reg +
+ HPB_DMAE_CHAN(cfg->dma_ch);
+
+ dev_dbg(dev, "Detected Slave device\n");
+ dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id);
+ dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch);
+ dev_dbg(dev, " -- channel->ch_irq: %d\n",
+ channel->ch_irq);
+ break;
+ }
+ }
+
+ err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
+ IRQF_SHARED, hpb_chan->dev_id);
+ if (err) {
+ dev_err(hpb_chan->shdma_chan.dev,
+ "DMA channel request_irq %d failed with error %d\n",
+ channel->ch_irq, err);
+ return err;
+ }
+
+ hpb_chan->plane_idx = 0;
+ hpb_chan->first_desc = true;
+
+ if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
+ hpb_chan->xfer_mode = XFER_SINGLE;
+ } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
+ (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
+ hpb_chan->xfer_mode = XFER_DOUBLE;
+ } else {
+ dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
+ shdma_free_irq(&hpb_chan->shdma_chan);
+ return -EINVAL;
+ }
+
+ if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
+ hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
+ ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
+ ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
+ hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
+ hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
+
+ return 0;
+}
+
+static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ const struct hpb_dmae_slave_config *sc =
+ hpb_dmae_find_slave(chan, slave_id);
+
+ if (!sc)
+ return -ENODEV;
+ if (try)
+ return 0;
+ chan->cfg = sc;
+ return hpb_dmae_alloc_chan_resources(chan, sc);
+}
+
+static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
+{
+}
+
+static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+
+ return chan->cfg->addr;
+}
+
+static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
+{
+ return &((struct hpb_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops hpb_dmae_ops = {
+ .desc_completed = hpb_dmae_desc_completed,
+ .halt_channel = hpb_dmae_halt,
+ .channel_busy = hpb_dmae_channel_busy,
+ .slave_addr = hpb_dmae_slave_addr,
+ .desc_setup = hpb_dmae_desc_setup,
+ .set_slave = hpb_dmae_set_slave,
+ .setup_xfer = hpb_dmae_setup_xfer,
+ .start_xfer = hpb_dmae_start_xfer,
+ .embedded_desc = hpb_dmae_embedded_desc,
+ .chan_irq = hpb_dmae_chan_irq,
+ .get_partial = hpb_dmae_get_partial,
+};
+
+static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
+{
+ struct shdma_dev *sdev = &hpbdev->shdma_dev;
+ struct platform_device *pdev =
+ to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
+ struct hpb_dmae_chan *new_hpb_chan;
+ struct shdma_chan *schan;
+
+ /* Alloc channel */
+ new_hpb_chan = devm_kzalloc(&pdev->dev,
+ sizeof(struct hpb_dmae_chan), GFP_KERNEL);
+ if (!new_hpb_chan) {
+ dev_err(hpbdev->shdma_dev.dma_dev.dev,
+ "No free memory for allocating DMA channels!\n");
+ return -ENOMEM;
+ }
+
+ schan = &new_hpb_chan->shdma_chan;
+ shdma_chan_probe(sdev, schan, id);
+
+ if (pdev->id >= 0)
+ snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+ "hpb-dmae%d.%d", pdev->id, id);
+ else
+ snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+ "hpb-dma.%d", id);
+
+ return 0;
+}
+
+static int hpb_dmae_probe(struct platform_device *pdev)
+{
+ struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
+ struct hpb_dmae_device *hpbdev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *comm, *rest, *mode, *irq_res;
+ int err, i;
+
+ /* Get platform data */
+ if (!pdata || !pdata->num_channels)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res)
+ return -ENODEV;
+
+ hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
+ GFP_KERNEL);
+ if (!hpbdev) {
+ dev_err(&pdev->dev, "Not enough memory\n");
+ return -ENOMEM;
+ }
+
+ hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(hpbdev->chan_reg))
+ return PTR_ERR(hpbdev->chan_reg);
+
+ hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
+ if (IS_ERR(hpbdev->comm_reg))
+ return PTR_ERR(hpbdev->comm_reg);
+
+ hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
+ if (IS_ERR(hpbdev->reset_reg))
+ return PTR_ERR(hpbdev->reset_reg);
+
+ hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
+ if (IS_ERR(hpbdev->mode_reg))
+ return PTR_ERR(hpbdev->mode_reg);
+
+ dma_dev = &hpbdev->shdma_dev.dma_dev;
+
+ spin_lock_init(&hpbdev->reg_lock);
+
+ /* Platform data */
+ hpbdev->pdata = pdata;
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+ /* Reset DMA controller */
+ hpb_dmae_reset(hpbdev);
+
+ pm_runtime_put(&pdev->dev);
+
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ hpbdev->shdma_dev.ops = &hpb_dmae_ops;
+ hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
+ err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
+ if (err < 0)
+ goto error;
+
+ /* Create DMA channels */
+ for (i = 0; i < pdata->num_channels; i++)
+ hpb_dmae_chan_probe(hpbdev, i);
+
+ platform_set_drvdata(pdev, hpbdev);
+ err = dma_async_device_register(dma_dev);
+ if (!err)
+ return 0;
+
+ shdma_cleanup(&hpbdev->shdma_dev);
+error:
+ pm_runtime_disable(&pdev->dev);
+ return err;
+}
+
+static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
+{
+ struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
+ BUG_ON(!schan);
+
+ shdma_free_irq(schan);
+ shdma_chan_remove(schan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static int hpb_dmae_remove(struct platform_device *pdev)
+{
+ struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ hpb_dmae_chan_remove(hpbdev);
+
+ return 0;
+}
+
+static void hpb_dmae_shutdown(struct platform_device *pdev)
+{
+ struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+ hpb_dmae_ctl_stop(hpbdev);
+}
+
+static struct platform_driver hpb_dmae_driver = {
+ .probe = hpb_dmae_probe,
+ .remove = hpb_dmae_remove,
+ .shutdown = hpb_dmae_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hpb-dma-engine",
+ },
+};
+module_platform_driver(hpb_dmae_driver);
+
+MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
new file mode 100644
index 0000000..a2b8258
--- /dev/null
+++ b/drivers/dma/sh/shdma-arm.h
@@ -0,0 +1,51 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+
+#ifndef SHDMA_ARM_H
+#define SHDMA_ARM_H
+
+#include "shdma.h"
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_64BIT = 7,
+ XMIT_SZ_128BIT = 3,
+ XMIT_SZ_256BIT = 4,
+ XMIT_SZ_512BIT = 5,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+#define SH_DMAE_TS_SHIFT { \
+ [XMIT_SZ_8BIT] = 0, \
+ [XMIT_SZ_16BIT] = 1, \
+ [XMIT_SZ_32BIT] = 2, \
+ [XMIT_SZ_64BIT] = 3, \
+ [XMIT_SZ_128BIT] = 4, \
+ [XMIT_SZ_256BIT] = 5, \
+ [XMIT_SZ_512BIT] = 6, \
+}
+
+#define TS_LOW_BIT 0x3 /* --xx */
+#define TS_HI_BIT 0xc /* xx-- */
+
+#define TS_LOW_SHIFT (3)
+#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */
+
+#define TS_INDEX2VAL(i) \
+ ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
+ (((i) & TS_HI_BIT) << TS_HI_SHIFT))
+
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
+
+#endif
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 28ca361..d94ab59 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -171,7 +171,8 @@ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
return NULL;
}
-static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
@@ -179,7 +180,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (schan->dev->of_node) {
match = schan->hw_req;
- ret = ops->set_slave(schan, match, true);
+ ret = ops->set_slave(schan, match, slave_addr, true);
if (ret < 0)
return ret;
@@ -194,7 +195,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (test_and_set_bit(slave_id, shdma_slave_used))
return -EBUSY;
- ret = ops->set_slave(schan, match, false);
+ ret = ops->set_slave(schan, match, slave_addr, false);
if (ret < 0) {
clear_bit(slave_id, shdma_slave_used);
return ret;
@@ -236,7 +237,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
if (!schan->dev->of_node && match >= slave_num)
return false;
- ret = ops->set_slave(schan, match, true);
+ ret = ops->set_slave(schan, match, 0, true);
if (ret < 0)
return false;
@@ -259,7 +260,7 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
*/
if (slave) {
/* Legacy mode: .private is set in filter */
- ret = shdma_setup_slave(schan, slave->slave_id);
+ ret = shdma_setup_slave(schan, slave->slave_id, 0);
if (ret < 0)
goto esetslave;
} else {
@@ -680,7 +681,9 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel, while using it...
*/
config = (struct dma_slave_config *)arg;
- ret = shdma_setup_slave(schan, config->slave_id);
+ ret = shdma_setup_slave(schan, config->slave_id,
+ config->direction == DMA_DEV_TO_MEM ?
+ config->src_addr : config->dst_addr);
if (ret < 0)
return ret;
break;
@@ -831,8 +834,8 @@ static irqreturn_t chan_irqt(int irq, void *dev)
int shdma_request_irq(struct shdma_chan *schan, int irq,
unsigned long flags, const char *name)
{
- int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
- flags, name, schan);
+ int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
+ chan_irqt, flags, name, schan);
schan->irq = ret < 0 ? ret : irq;
@@ -840,13 +843,6 @@ int shdma_request_irq(struct shdma_chan *schan, int irq,
}
EXPORT_SYMBOL(shdma_request_irq);
-void shdma_free_irq(struct shdma_chan *schan)
-{
- if (schan->irq >= 0)
- free_irq(schan->irq, schan);
-}
-EXPORT_SYMBOL(shdma_free_irq);
-
void shdma_chan_probe(struct shdma_dev *sdev,
struct shdma_chan *schan, int id)
{
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 11bcb05..06473a0 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -42,12 +42,9 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
static int shdma_of_probe(struct platform_device *pdev)
{
- const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
int ret;
- if (!lookup)
- return -EINVAL;
-
ret = of_dma_controller_register(pdev->dev.of_node,
shdma_of_xlate, pdev);
if (ret < 0)
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c
new file mode 100644
index 0000000..4fb9997
--- /dev/null
+++ b/drivers/dma/sh/shdma-r8a73a4.c
@@ -0,0 +1,77 @@
+/*
+ * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+#include <linux/sh_dma.h>
+
+#include "shdma-arm.h"
+
+const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT;
+
+static const struct sh_dmae_slave_config dma_slaves[] = {
+ {
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd1, /* MMC0 Tx */
+ }, {
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd2, /* MMC0 Rx */
+ }, {
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe1, /* MMC1 Tx */
+ }, {
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe2, /* MMC1 Rx */
+ },
+};
+
+#define DMAE_CHANNEL(a, b) \
+ { \
+ .offset = (a) - 0x20, \
+ .dmars = (a) - 0x20 + 0x40, \
+ .chclr_bit = (b), \
+ .chclr_offset = 0x80 - 0x20, \
+ }
+
+static const struct sh_dmae_channel dma_channels[] = {
+ DMAE_CHANNEL(0x8000, 0),
+ DMAE_CHANNEL(0x8080, 1),
+ DMAE_CHANNEL(0x8100, 2),
+ DMAE_CHANNEL(0x8180, 3),
+ DMAE_CHANNEL(0x8200, 4),
+ DMAE_CHANNEL(0x8280, 5),
+ DMAE_CHANNEL(0x8300, 6),
+ DMAE_CHANNEL(0x8380, 7),
+ DMAE_CHANNEL(0x8400, 8),
+ DMAE_CHANNEL(0x8480, 9),
+ DMAE_CHANNEL(0x8500, 10),
+ DMAE_CHANNEL(0x8580, 11),
+ DMAE_CHANNEL(0x8600, 12),
+ DMAE_CHANNEL(0x8680, 13),
+ DMAE_CHANNEL(0x8700, 14),
+ DMAE_CHANNEL(0x8780, 15),
+ DMAE_CHANNEL(0x8800, 16),
+ DMAE_CHANNEL(0x8880, 17),
+ DMAE_CHANNEL(0x8900, 18),
+ DMAE_CHANNEL(0x8980, 19),
+};
+
+const struct sh_dmae_pdata r8a73a4_dma_pdata = {
+ .slave = dma_slaves,
+ .slave_num = ARRAY_SIZE(dma_slaves),
+ .channel = dma_channels,
+ .channel_num = ARRAY_SIZE(dma_channels),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chclr_present = 1,
+ .chclr_bitwise = 1,
+};
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9314e93..758a57b 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -28,18 +28,19 @@ struct sh_dmae_chan {
struct shdma_chan shdma_chan;
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
- u32 __iomem *base;
+ void __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
+ dma_addr_t slave_addr;
};
struct sh_dmae_device {
struct shdma_dev shdma_dev;
struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
- struct sh_dmae_pdata *pdata;
+ const struct sh_dmae_pdata *pdata;
struct list_head node;
- u32 __iomem *chan_reg;
- u16 __iomem *dmars;
+ void __iomem *chan_reg;
+ void __iomem *dmars;
unsigned int chcr_offset;
u32 chcr_ie_bit;
};
@@ -61,4 +62,11 @@ struct sh_dmae_desc {
#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
struct sh_dmae_device, shdma_dev.dma_dev)
+#ifdef CONFIG_SHDMA_R8A73A4
+extern const struct sh_dmae_pdata r8a73a4_dma_pdata;
+#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata)
+#else
+#define r8a73a4_shdma_devid NULL
+#endif
+
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdmac.c
index 5039fbc..1069e88 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdmac.c
@@ -20,6 +20,8 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
@@ -35,6 +37,15 @@
#include "../dmaengine.h"
#include "shdma.h"
+/* DMA register */
+#define SAR 0x00
+#define DAR 0x04
+#define TCR 0x08
+#define CHCR 0x0C
+#define DMAOR 0x40
+
+#define TEND 0x18 /* USB-DMAC */
+
#define SH_DMAE_DRV_NAME "sh-dma-engine"
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
@@ -49,27 +60,37 @@
static DEFINE_SPINLOCK(sh_dmae_lock);
static LIST_HEAD(sh_dmae_devices);
-static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+/*
+ * Different DMAC implementations provide different ways to clear DMA channels:
+ * (1) none - no CHCLR registers are available
+ * (2) one CHCLR register per channel - 0 has to be written to it to clear
+ * channel buffers
+ * (3) one CHCLR per several channels - 1 has to be written to the bit,
+ * corresponding to the specific channel to reset it
+ */
+static void channel_clear(struct sh_dmae_chan *sh_dc)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+ const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
+ sh_dc->shdma_chan.id;
+ u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
- __raw_writel(data, shdev->chan_reg +
- shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
+ __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
- __raw_writel(data, sh_dc->base + reg / sizeof(u32));
+ __raw_writel(data, sh_dc->base + reg);
}
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
{
- return __raw_readl(sh_dc->base + reg / sizeof(u32));
+ return __raw_readl(sh_dc->base + reg);
}
static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+ void __iomem *addr = shdev->chan_reg + DMAOR;
if (shdev->pdata->dmaor_is_32bit)
return __raw_readl(addr);
@@ -79,7 +100,7 @@ static u16 dmaor_read(struct sh_dmae_device *shdev)
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+ void __iomem *addr = shdev->chan_reg + DMAOR;
if (shdev->pdata->dmaor_is_32bit)
__raw_writel(data, addr);
@@ -91,14 +112,14 @@ static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
- __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset);
}
static u32 chcr_read(struct sh_dmae_chan *sh_dc)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
- return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
+ return __raw_readl(sh_dc->base + shdev->chcr_offset);
}
/*
@@ -133,7 +154,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
if (sh_chan)
- chclr_write(sh_chan, 0);
+ channel_clear(sh_chan);
}
}
@@ -167,7 +188,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -180,7 +201,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
for (i = 0; i < pdata->ts_shift_num; i++)
@@ -240,9 +261,9 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
- u16 __iomem *addr = shdev->dmars;
+ void __iomem *addr = shdev->dmars;
unsigned int shift = chan_pdata->dmars_bit;
if (dmae_is_busy(sh_chan))
@@ -253,8 +274,8 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
/* in the case of a missing DMARS resource use first memory window */
if (!addr)
- addr = (u16 __iomem *)shdev->chan_reg;
- addr += chan_pdata->dmars / sizeof(u16);
+ addr = shdev->chan_reg;
+ addr += chan_pdata->dmars;
__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
addr);
@@ -309,7 +330,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
struct sh_dmae_chan *sh_chan, int match)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_slave_config *cfg;
int i;
@@ -323,7 +344,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
} else {
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
if (cfg->mid_rid == match) {
- sh_chan->shdma_chan.slave_id = cfg->slave_id;
+ sh_chan->shdma_chan.slave_id = i;
return cfg;
}
}
@@ -332,7 +353,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
}
static int sh_dmae_set_slave(struct shdma_chan *schan,
- int slave_id, bool try)
+ int slave_id, dma_addr_t slave_addr, bool try)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
@@ -340,8 +361,10 @@ static int sh_dmae_set_slave(struct shdma_chan *schan,
if (!cfg)
return -ENXIO;
- if (!try)
+ if (!try) {
sh_chan->config = cfg;
+ sh_chan->slave_addr = slave_addr ? : cfg->addr;
+ }
return 0;
}
@@ -505,7 +528,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
struct shdma_chan *schan;
int err;
- sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+ sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
+ GFP_KERNEL);
if (!sh_chan) {
dev_err(sdev->dma_dev.dev,
"No free memory for allocating dma channels!\n");
@@ -517,7 +541,7 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
shdma_chan_probe(sdev, schan, id);
- sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
+ sh_chan->base = shdev->chan_reg + chan_pdata->offset;
/* set up channel irq */
if (pdev->id >= 0)
@@ -541,7 +565,6 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
err_no_irq:
/* remove from dmaengine device node */
shdma_chan_remove(schan);
- kfree(sh_chan);
return err;
}
@@ -552,14 +575,9 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
int i;
shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
- struct sh_dmae_chan *sh_chan = container_of(schan,
- struct sh_dmae_chan, shdma_chan);
BUG_ON(!schan);
- shdma_free_irq(&sh_chan->shdma_chan);
-
shdma_chan_remove(schan);
- kfree(sh_chan);
}
dma_dev->chancnt = 0;
}
@@ -636,7 +654,7 @@ static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
* This is an exclusive slave DMA operation, may only be called after a
* successful slave configuration.
*/
- return sh_chan->config->addr;
+ return sh_chan->slave_addr;
}
static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
@@ -658,9 +676,15 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
.get_partial = sh_dmae_get_partial,
};
+static const struct of_device_id sh_dmae_of_match[] = {
+ {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
static int sh_dmae_probe(struct platform_device *pdev)
{
- struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+ const struct sh_dmae_pdata *pdata;
unsigned long irqflags = IRQF_DISABLED,
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -669,6 +693,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
struct dma_device *dma_dev;
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+ if (pdev->dev.of_node)
+ pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
/* get platform data */
if (!pdata || !pdata->channel_num)
return -ENODEV;
@@ -696,33 +725,22 @@ static int sh_dmae_probe(struct platform_device *pdev)
if (!chan || !errirq_res)
return -ENODEV;
- if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
- dev_err(&pdev->dev, "DMAC register region already claimed\n");
- return -EBUSY;
- }
-
- if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
- dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
- err = -EBUSY;
- goto ermrdmars;
- }
-
- err = -ENOMEM;
- shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
+ shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
+ GFP_KERNEL);
if (!shdev) {
dev_err(&pdev->dev, "Not enough memory\n");
- goto ealloc;
+ return -ENOMEM;
}
dma_dev = &shdev->shdma_dev.dma_dev;
- shdev->chan_reg = ioremap(chan->start, resource_size(chan));
- if (!shdev->chan_reg)
- goto emapchan;
+ shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(shdev->chan_reg))
+ return PTR_ERR(shdev->chan_reg);
if (dmars) {
- shdev->dmars = ioremap(dmars->start, resource_size(dmars));
- if (!shdev->dmars)
- goto emapdmars;
+ shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
+ if (IS_ERR(shdev->dmars))
+ return PTR_ERR(shdev->dmars);
}
if (!pdata->slave_only)
@@ -783,8 +801,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
errirq = errirq_res->start;
- err = request_irq(errirq, sh_dmae_err, irqflags,
- "DMAC Address Error", shdev);
+ err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
if (err) {
dev_err(&pdev->dev,
"DMA failed requesting irq #%d, error %d\n",
@@ -862,7 +880,6 @@ chan_probe_err:
sh_dmae_chan_remove(shdev);
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
- free_irq(errirq, shdev);
eirq_err:
#endif
rst_err:
@@ -873,21 +890,9 @@ rst_err:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
shdma_cleanup(&shdev->shdma_dev);
eshdma:
- if (dmars)
- iounmap(shdev->dmars);
-emapdmars:
- iounmap(shdev->chan_reg);
synchronize_rcu();
-emapchan:
- kfree(shdev);
-ealloc:
- if (dmars)
- release_mem_region(dmars->start, resource_size(dmars));
-ermrdmars:
- release_mem_region(chan->start, resource_size(chan));
return err;
}
@@ -896,14 +901,9 @@ static int sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
- struct resource *res;
- int errirq = platform_get_irq(pdev, 0);
dma_async_device_unregister(dma_dev);
- if (errirq > 0)
- free_irq(errirq, shdev);
-
spin_lock_irq(&sh_dmae_lock);
list_del_rcu(&shdev->node);
spin_unlock_irq(&sh_dmae_lock);
@@ -913,31 +913,11 @@ static int sh_dmae_remove(struct platform_device *pdev)
sh_dmae_chan_remove(shdev);
shdma_cleanup(&shdev->shdma_dev);
- if (shdev->dmars)
- iounmap(shdev->dmars);
- iounmap(shdev->chan_reg);
-
- platform_set_drvdata(pdev, NULL);
-
synchronize_rcu();
- kfree(shdev);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res)
- release_mem_region(res->start, resource_size(res));
return 0;
}
-static const struct of_device_id sh_dmae_of_match[] = {
- { .compatible = "renesas,shdma", },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
-
static struct platform_driver sh_dmae_driver = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index e7c94bb..c7e9cdf 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -150,7 +150,8 @@ static const struct sudmac_slave_config *sudmac_find_slave(
return NULL;
}
-static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr, bool try)
{
struct sudmac_chan *sc = to_chan(schan);
const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
@@ -298,11 +299,8 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev)
int i;
shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
- struct sudmac_chan *sc = to_chan(schan);
-
BUG_ON(!schan);
- shdma_free_irq(&sc->shdma_chan);
shdma_chan_remove(schan);
}
dma_dev->chancnt = 0;
@@ -335,7 +333,7 @@ static const struct shdma_ops sudmac_shdma_ops = {
static int sudmac_probe(struct platform_device *pdev)
{
- struct sudmac_pdata *pdata = pdev->dev.platform_data;
+ struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
int err, i;
struct sudmac_device *su_dev;
struct dma_device *dma_dev;
@@ -345,9 +343,8 @@ static int sudmac_probe(struct platform_device *pdev)
if (!pdata)
return -ENODEV;
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!chan || !irq_res)
+ if (!irq_res)
return -ENODEV;
err = -ENOMEM;
@@ -360,9 +357,10 @@ static int sudmac_probe(struct platform_device *pdev)
dma_dev = &su_dev->shdma_dev.dma_dev;
- su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
- if (!su_dev->chan_reg)
- return err;
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(su_dev->chan_reg))
+ return PTR_ERR(su_dev->chan_reg);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
@@ -373,7 +371,7 @@ static int sudmac_probe(struct platform_device *pdev)
return err;
/* platform data */
- su_dev->pdata = pdev->dev.platform_data;
+ su_dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, su_dev);
@@ -393,7 +391,6 @@ static int sudmac_probe(struct platform_device *pdev)
chan_probe_err:
sudmac_chan_remove(su_dev);
- platform_set_drvdata(pdev, NULL);
shdma_cleanup(&su_dev->shdma_dev);
return err;
@@ -407,7 +404,6 @@ static int sudmac_remove(struct platform_device *pdev)
dma_async_device_unregister(dma_dev);
sudmac_chan_remove(su_dev);
shdma_cleanup(&su_dev->shdma_dev);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 716b23e..6aec3ad 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -73,6 +74,11 @@ struct sirfsoc_dma_chan {
int mode;
};
+struct sirfsoc_dma_regs {
+ u32 ctrl[SIRFSOC_DMA_CHANNELS];
+ u32 interrupt_en;
+};
+
struct sirfsoc_dma {
struct dma_device dma;
struct tasklet_struct tasklet;
@@ -81,10 +87,13 @@ struct sirfsoc_dma {
int irq;
struct clk *clk;
bool is_marco;
+ struct sirfsoc_dma_regs regs_save;
};
#define DRV_NAME "sirfsoc_dma"
+static int sirfsoc_dma_runtime_suspend(struct device *dev);
+
/* Convert struct dma_chan to struct sirfsoc_dma_chan */
static inline
struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
@@ -393,6 +402,8 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
LIST_HEAD(descs);
int i;
+ pm_runtime_get_sync(sdma->dma.dev);
+
/* Alloc descriptors for this channel */
for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
@@ -425,6 +436,7 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
struct sirfsoc_dma_desc *sdesc, *tmp;
unsigned long flags;
LIST_HEAD(descs);
@@ -445,6 +457,8 @@ static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
/* Free descriptors */
list_for_each_entry_safe(sdesc, tmp, &descs, node)
kfree(sdesc);
+
+ pm_runtime_put(sdma->dma.dev);
}
/* Send pending descriptor to hardware */
@@ -595,7 +609,7 @@ sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
spin_unlock_irqrestore(&schan->lock, iflags);
if (!sdesc)
- return 0;
+ return NULL;
/* Place descriptor in prepared list */
spin_lock_irqsave(&schan->lock, iflags);
@@ -723,14 +737,14 @@ static int sirfsoc_dma_probe(struct platform_device *op)
tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
- clk_prepare_enable(sdma->clk);
-
/* Register DMA engine */
dev_set_drvdata(dev, sdma);
+
ret = dma_async_device_register(dma);
if (ret)
goto free_irq;
+ pm_runtime_enable(&op->dev);
dev_info(dev, "initialized SIRFSOC DMAC driver\n");
return 0;
@@ -747,13 +761,124 @@ static int sirfsoc_dma_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- clk_disable_unprepare(sdma->clk);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
+ pm_runtime_disable(&op->dev);
+ if (!pm_runtime_status_suspended(&op->dev))
+ sirfsoc_dma_runtime_suspend(&op->dev);
+
+ return 0;
+}
+
+static int sirfsoc_dma_runtime_suspend(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(sdma->clk);
+ return 0;
+}
+
+static int sirfsoc_dma_runtime_resume(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(sdma->clk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int sirfsoc_dma_pm_suspend(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ struct sirfsoc_dma_regs *save = &sdma->regs_save;
+ struct sirfsoc_dma_desc *sdesc;
+ struct sirfsoc_dma_chan *schan;
+ int ch;
+ int ret;
+
+ /*
+ * if we were runtime-suspended before, resume to enable clock
+ * before accessing register
+ */
+ if (pm_runtime_status_suspended(dev)) {
+ ret = sirfsoc_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * DMA controller will lose all registers while suspending
+ * so we need to save registers for active channels
+ */
+ for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+ schan = &sdma->channels[ch];
+ if (list_empty(&schan->active))
+ continue;
+ sdesc = list_first_entry(&schan->active,
+ struct sirfsoc_dma_desc,
+ node);
+ save->ctrl[ch] = readl_relaxed(sdma->base +
+ ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ }
+ save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
+
+ /* Disable clock */
+ sirfsoc_dma_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int sirfsoc_dma_pm_resume(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ struct sirfsoc_dma_regs *save = &sdma->regs_save;
+ struct sirfsoc_dma_desc *sdesc;
+ struct sirfsoc_dma_chan *schan;
+ int ch;
+ int ret;
+
+ /* Enable clock before accessing register */
+ ret = sirfsoc_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
+ for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+ schan = &sdma->channels[ch];
+ if (list_empty(&schan->active))
+ continue;
+ sdesc = list_first_entry(&schan->active,
+ struct sirfsoc_dma_desc,
+ node);
+ writel_relaxed(sdesc->width,
+ sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
+ writel_relaxed(sdesc->xlen,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
+ writel_relaxed(sdesc->ylen,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
+ writel_relaxed(save->ctrl[ch],
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ writel_relaxed(sdesc->addr >> 2,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
+ }
+
+ /* if we were runtime-suspended before, suspend again */
+ if (pm_runtime_status_suspended(dev))
+ sirfsoc_dma_runtime_suspend(dev);
+
return 0;
}
+static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
+ SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
+};
+
static struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
{ .compatible = "sirf,marco-dmac", },
@@ -766,6 +891,7 @@ static struct platform_driver sirfsoc_dma_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &sirfsoc_dma_pm_ops,
.of_match_table = sirfsoc_dma_match,
},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 5ab5880..82d2b97 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2591,6 +2591,9 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
int i;
sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
+ if (!sg)
+ return NULL;
+
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
@@ -3139,7 +3142,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
struct resource *res = NULL;
@@ -3226,8 +3229,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
dev_info(&pdev->dev,
- "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
- rev, res->start, num_phy_chans, num_log_chans);
+ "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
+ rev, &res->start, num_phy_chans, num_log_chans);
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
(num_phy_chans + num_log_chans + num_memcpy_chans) *
@@ -3485,7 +3488,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
{
struct stedma40_platform_data *pdata;
int num_phy = 0, num_memcpy = 0, num_disabled = 0;
- const const __be32 *list;
+ const __be32 *list;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct stedma40_platform_data),
@@ -3516,7 +3519,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
list = of_get_property(np, "disabled-channels", &num_disabled);
num_disabled /= sizeof(*list);
- if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
+ if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
d40_err(&pdev->dev,
"Invalid number of disabled channels specified (%d)\n",
num_disabled);
@@ -3535,7 +3538,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
static int __init d40_probe(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
int ret = -ENOENT;
struct d40_base *base = NULL;
@@ -3579,9 +3582,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
- d40_err(&pdev->dev,
- "Failed to request LCPA region 0x%x-0x%x\n",
- res->start, res->end);
+ d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
goto failure;
}
@@ -3589,8 +3590,8 @@ static int __init d40_probe(struct platform_device *pdev)
val = readl(base->virtbase + D40_DREG_LCPA);
if (res->start != val && val != 0) {
dev_warn(&pdev->dev,
- "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
- __func__, val, res->start);
+ "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
+ __func__, val, &res->start);
} else
writel(res->start, base->virtbase + D40_DREG_LCPA);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index f137914..5d4986e 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -767,13 +767,11 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
unsigned long flags;
unsigned int residual;
- spin_lock_irqsave(&tdc->lock, flags);
-
ret = dma_cookie_status(dc, cookie, txstate);
- if (ret == DMA_SUCCESS) {
- spin_unlock_irqrestore(&tdc->lock, flags);
+ if (ret == DMA_SUCCESS)
return ret;
- }
+
+ spin_lock_irqsave(&tdc->lock, flags);
/* Check on wait_ack desc status */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 0ef43c1..28af214 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -669,7 +669,7 @@ static irqreturn_t td_irq(int irq, void *devid)
static int td_probe(struct platform_device *pdev)
{
- struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timb_dma *td;
struct resource *iomem;
int irq;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index a59fb48..71e8e77 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -962,15 +962,14 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dc->lock);
- txx9dmac_scan_descriptors(dc);
- spin_unlock_bh(&dc->lock);
+ if (ret == DMA_SUCCESS)
+ return DMA_SUCCESS;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ spin_lock_bh(&dc->lock);
+ txx9dmac_scan_descriptors(dc);
+ spin_unlock_bh(&dc->lock);
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
@@ -1118,9 +1117,10 @@ static void txx9dmac_off(struct txx9dmac_dev *ddev)
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
{
- struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
+ struct txx9dmac_chan_platform_data *cpdata =
+ dev_get_platdata(&pdev->dev);
struct platform_device *dmac_dev = cpdata->dmac_dev;
- struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
struct txx9dmac_chan *dc;
int err;
int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
@@ -1203,7 +1203,7 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
static int __init txx9dmac_probe(struct platform_device *pdev)
{
- struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *io;
struct txx9dmac_dev *ddev;
u32 mcr;
@@ -1282,7 +1282,7 @@ static int txx9dmac_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
- struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 mcr;
mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index ac1b43a..d7d5c8a 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -486,7 +486,7 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
static int add_client_resource(struct client *client,
struct client_resource *resource, gfp_t gfp_mask)
{
- bool preload = gfp_mask & __GFP_WAIT;
+ bool preload = !!(gfp_mask & __GFP_WAIT);
unsigned long flags;
int ret;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 28a94c7..e5af0e3 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -1262,8 +1262,7 @@ static int __init fw_core_init(void)
{
int ret;
- fw_workqueue = alloc_workqueue("firewire",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
if (!fw_workqueue)
return -ENOMEM;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index afb701e..6aa8a86 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -235,13 +235,15 @@ struct fw_ohci {
dma_addr_t next_config_rom_bus;
__be32 next_header;
- __le32 *self_id_cpu;
+ __le32 *self_id;
dma_addr_t self_id_bus;
struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
+static struct workqueue_struct *selfid_workqueue;
+
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
{
return container_of(card, struct fw_ohci, card);
@@ -271,6 +273,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
+#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
@@ -278,17 +281,16 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
#define PCI_DEVICE_ID_VIA_VT630X 0x3044
-#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define PCI_REV_ID_VIA_VT6306 0x46
-#define QUIRK_CYCLE_TIMER 1
-#define QUIRK_RESET_PACKET 2
-#define QUIRK_BE_HEADERS 4
-#define QUIRK_NO_1394A 8
-#define QUIRK_NO_MSI 16
-#define QUIRK_TI_SLLZ059 32
-#define QUIRK_IR_WAKE 64
-#define QUIRK_PHY_LCTRL_TIMEOUT 128
+#define QUIRK_CYCLE_TIMER 0x1
+#define QUIRK_RESET_PACKET 0x2
+#define QUIRK_BE_HEADERS 0x4
+#define QUIRK_NO_1394A 0x8
+#define QUIRK_NO_MSI 0x10
+#define QUIRK_TI_SLLZ059 0x20
+#define QUIRK_IR_WAKE 0x40
+#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -1929,12 +1931,12 @@ static void bus_reset_work(struct work_struct *work)
return;
}
- generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
+ generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
- u32 id = cond_le32_to_cpu(ohci->self_id_cpu[i]);
- u32 id2 = cond_le32_to_cpu(ohci->self_id_cpu[i + 1]);
+ u32 id = cond_le32_to_cpu(ohci->self_id[i]);
+ u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
if (id != ~id2) {
/*
@@ -2087,7 +2089,7 @@ static irqreturn_t irq_handler(int irq, void *data)
log_irqs(ohci, event);
if (event & OHCI1394_selfIDComplete)
- queue_work(fw_workqueue, &ohci->bus_reset_work);
+ queue_work(selfid_workqueue, &ohci->bus_reset_work);
if (event & OHCI1394_RQPkt)
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
@@ -3692,7 +3694,7 @@ static int pci_probe(struct pci_dev *dev,
goto fail_contexts;
}
- ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
+ ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
bus_options = reg_read(ohci, OHCI1394_BusOptions);
@@ -3870,7 +3872,23 @@ static struct pci_driver fw_ohci_pci_driver = {
#endif
};
-module_pci_driver(fw_ohci_pci_driver);
+static int __init fw_ohci_init(void)
+{
+ selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
+ if (!selfid_workqueue)
+ return -ENOMEM;
+
+ return pci_register_driver(&fw_ohci_pci_driver);
+}
+
+static void __exit fw_ohci_cleanup(void)
+{
+ pci_unregister_driver(&fw_ohci_pci_driver);
+ destroy_workqueue(selfid_workqueue);
+}
+
+module_init(fw_ohci_init);
+module_exit(fw_ohci_cleanup);
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 232fa8f..fa0affb 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -14,7 +14,7 @@
* of and an antecedent to, SMBIOS, which stands for System
* Management BIOS. See further: http://www.dmtf.org/standards
*/
-static char dmi_empty_string[] = " ";
+static const char dmi_empty_string[] = " ";
static u16 __initdata dmi_ver;
/*
@@ -49,7 +49,7 @@ static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
return "";
}
-static char * __init dmi_string(const struct dmi_header *dm, u8 s)
+static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
{
const char *bp = dmi_string_nosave(dm, s);
char *str;
@@ -62,8 +62,6 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
str = dmi_alloc(len);
if (str != NULL)
strcpy(str, bp);
- else
- printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
return str;
}
@@ -133,17 +131,18 @@ static int __init dmi_checksum(const u8 *buf, u8 len)
return sum == 0;
}
-static char *dmi_ident[DMI_STRING_MAX];
+static const char *dmi_ident[DMI_STRING_MAX];
static LIST_HEAD(dmi_devices);
int dmi_available;
/*
* Save a DMI string
*/
-static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int string)
+static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
+ int string)
{
- const char *d = (const char*) dm;
- char *p;
+ const char *d = (const char *) dm;
+ const char *p;
if (dmi_ident[slot])
return;
@@ -155,9 +154,10 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int str
dmi_ident[slot] = p;
}
-static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int index)
+static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
+ int index)
{
- const u8 *d = (u8*) dm + index;
+ const u8 *d = (u8 *) dm + index;
char *s;
int is_ff = 1, is_00 = 1, i;
@@ -188,12 +188,13 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
else
sprintf(s, "%pUB", d);
- dmi_ident[slot] = s;
+ dmi_ident[slot] = s;
}
-static void __init dmi_save_type(const struct dmi_header *dm, int slot, int index)
+static void __init dmi_save_type(const struct dmi_header *dm, int slot,
+ int index)
{
- const u8 *d = (u8*) dm + index;
+ const u8 *d = (u8 *) dm + index;
char *s;
if (dmi_ident[slot])
@@ -216,10 +217,8 @@ static void __init dmi_save_one_device(int type, const char *name)
return;
dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
- if (!dev) {
- printk(KERN_ERR "dmi_save_one_device: out of memory.\n");
+ if (!dev)
return;
- }
dev->type = type;
strcpy((char *)(dev + 1), name);
@@ -249,17 +248,14 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
struct dmi_device *dev;
for (i = 1; i <= count; i++) {
- char *devname = dmi_string(dm, i);
+ const char *devname = dmi_string(dm, i);
if (devname == dmi_empty_string)
continue;
dev = dmi_alloc(sizeof(*dev));
- if (!dev) {
- printk(KERN_ERR
- "dmi_save_oem_strings_devices: out of memory.\n");
+ if (!dev)
break;
- }
dev->type = DMI_DEV_TYPE_OEM_STRING;
dev->name = devname;
@@ -272,21 +268,17 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
{
struct dmi_device *dev;
- void * data;
+ void *data;
data = dmi_alloc(dm->length);
- if (data == NULL) {
- printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
+ if (data == NULL)
return;
- }
memcpy(data, dm, dm->length);
dev = dmi_alloc(sizeof(*dev));
- if (!dev) {
- printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
+ if (!dev)
return;
- }
dev->type = DMI_DEV_TYPE_IPMI;
dev->name = "IPMI controller";
@@ -301,10 +293,9 @@ static void __init dmi_save_dev_onboard(int instance, int segment, int bus,
struct dmi_dev_onboard *onboard_dev;
onboard_dev = dmi_alloc(sizeof(*onboard_dev) + strlen(name) + 1);
- if (!onboard_dev) {
- printk(KERN_ERR "dmi_save_dev_onboard: out of memory.\n");
+ if (!onboard_dev)
return;
- }
+
onboard_dev->instance = instance;
onboard_dev->segment = segment;
onboard_dev->bus = bus;
@@ -320,7 +311,7 @@ static void __init dmi_save_dev_onboard(int instance, int segment, int bus,
static void __init dmi_save_extended_devices(const struct dmi_header *dm)
{
- const u8 *d = (u8*) dm + 5;
+ const u8 *d = (u8 *) dm + 5;
/* Skip disabled device */
if ((*d & 0x80) == 0)
@@ -338,7 +329,7 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
*/
static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
{
- switch(dm->type) {
+ switch (dm->type) {
case 0: /* BIOS Information */
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
@@ -502,13 +493,7 @@ void __init dmi_scan_machine(void)
dmi_available = 1;
goto out;
}
- }
- else {
- /*
- * no iounmap() for that ioremap(); it would be a no-op, but
- * it's so early in setup that sucker gets confused into doing
- * what it shouldn't if we actually call it.
- */
+ } else {
p = dmi_ioremap(0xF0000, 0x10000);
if (p == NULL)
goto error;
@@ -533,7 +518,7 @@ void __init dmi_scan_machine(void)
dmi_iounmap(p, 0x10000);
}
error:
- printk(KERN_INFO "DMI not present or invalid.\n");
+ pr_info("DMI not present or invalid.\n");
out:
dmi_initialized = 1;
}
@@ -669,7 +654,7 @@ int dmi_name_in_serial(const char *str)
/**
* dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
- * @str: Case sensitive Name
+ * @str: Case sensitive Name
*/
int dmi_name_in_vendors(const char *str)
{
@@ -696,13 +681,13 @@ EXPORT_SYMBOL(dmi_name_in_vendors);
* A new search is initiated by passing %NULL as the @from argument.
* If @from is not %NULL, searches continue from next device.
*/
-const struct dmi_device * dmi_find_device(int type, const char *name,
+const struct dmi_device *dmi_find_device(int type, const char *name,
const struct dmi_device *from)
{
const struct list_head *head = from ? &from->list : &dmi_devices;
struct list_head *d;
- for(d = head->next; d != &dmi_devices; d = d->next) {
+ for (d = head->next; d != &dmi_devices; d = d->next) {
const struct dmi_device *dev =
list_entry(d, struct dmi_device, list);
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index acba0b9..6eb535f 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -525,7 +525,7 @@ static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
u32 data_type;
} param;
- rc = strict_strtoul(buf, 0, &val);
+ rc = kstrtoul(buf, 0, &val);
if (rc)
return rc;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 349b161..b6ed304 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -203,6 +203,14 @@ config GPIO_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
+config GPIO_OCTEON
+ tristate "Cavium OCTEON GPIO"
+ depends on GPIOLIB && CAVIUM_OCTEON_SOC
+ default y
+ help
+ Say yes here to support the on-chip GPIO lines on the OCTEON
+ family of SOCs.
+
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM && ARM_AMBA
@@ -314,7 +322,7 @@ config GPIO_ICH
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
select MFD_CORE
select MFD_VX855
help
@@ -388,7 +396,7 @@ config GPIO_MAX732X
config GPIO_MAX732X_IRQ
bool "Interrupt controller support for MAX732x"
- depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
+ depends on GPIO_MAX732X=y
help
Say yes here to enable the max732x to be used as an interrupt
controller. It requires the driver to be built in the kernel.
@@ -653,7 +661,7 @@ config GPIO_TIMBERDALE
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
select MFD_CORE
select MFD_RDC321X
help
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 97438bf..98e23eb 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
new file mode 100644
index 0000000..71a4a31
--- /dev/null
+++ b/drivers/gpio/gpio-octeon.c
@@ -0,0 +1,157 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium Inc.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-gpio-defs.h>
+
+#define RX_DAT 0x80
+#define TX_SET 0x88
+#define TX_CLEAR 0x90
+/*
+ * The address offset of the GPIO configuration register for a given
+ * line.
+ */
+static unsigned int bit_cfg_reg(unsigned int offset)
+{
+ /*
+ * The register stride is 8, with a discontinuity after the
+ * first 16.
+ */
+ if (offset < 16)
+ return 8 * offset;
+ else
+ return 8 * (offset - 16) + 0x100;
+}
+
+struct octeon_gpio {
+ struct gpio_chip chip;
+ u64 register_base;
+};
+
+static int octeon_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct octeon_gpio *gpio = container_of(chip, struct octeon_gpio, chip);
+
+ cvmx_write_csr(gpio->register_base + bit_cfg_reg(offset), 0);
+ return 0;
+}
+
+static void octeon_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct octeon_gpio *gpio = container_of(chip, struct octeon_gpio, chip);
+ u64 mask = 1ull << offset;
+ u64 reg = gpio->register_base + (value ? TX_SET : TX_CLEAR);
+ cvmx_write_csr(reg, mask);
+}
+
+static int octeon_gpio_dir_out(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct octeon_gpio *gpio = container_of(chip, struct octeon_gpio, chip);
+ union cvmx_gpio_bit_cfgx cfgx;
+
+ octeon_gpio_set(chip, offset, value);
+
+ cfgx.u64 = 0;
+ cfgx.s.tx_oe = 1;
+
+ cvmx_write_csr(gpio->register_base + bit_cfg_reg(offset), cfgx.u64);
+ return 0;
+}
+
+static int octeon_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct octeon_gpio *gpio = container_of(chip, struct octeon_gpio, chip);
+ u64 read_bits = cvmx_read_csr(gpio->register_base + RX_DAT);
+
+ return ((1ull << offset) & read_bits) != 0;
+}
+
+static int octeon_gpio_probe(struct platform_device *pdev)
+{
+ struct octeon_gpio *gpio;
+ struct gpio_chip *chip;
+ struct resource *res_mem;
+ int err = 0;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+ chip = &gpio->chip;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(&pdev->dev, "found no memory resource\n");
+ err = -ENXIO;
+ goto out;
+ }
+ if (!devm_request_mem_region(&pdev->dev, res_mem->start,
+ resource_size(res_mem),
+ res_mem->name)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -ENXIO;
+ goto out;
+ }
+ gpio->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
+ resource_size(res_mem));
+
+ pdev->dev.platform_data = chip;
+ chip->label = "octeon-gpio";
+ chip->dev = &pdev->dev;
+ chip->owner = THIS_MODULE;
+ chip->base = 0;
+ chip->can_sleep = 0;
+ chip->ngpio = 20;
+ chip->direction_input = octeon_gpio_dir_in;
+ chip->get = octeon_gpio_get;
+ chip->direction_output = octeon_gpio_dir_out;
+ chip->set = octeon_gpio_set;
+ err = gpiochip_add(chip);
+ if (err)
+ goto out;
+
+ dev_info(&pdev->dev, "OCTEON GPIO driver probed.\n");
+out:
+ return err;
+}
+
+static int octeon_gpio_remove(struct platform_device *pdev)
+{
+ struct gpio_chip *chip = pdev->dev.platform_data;
+ return gpiochip_remove(chip);
+}
+
+static struct of_device_id octeon_gpio_match[] = {
+ {
+ .compatible = "cavium,octeon-3860-gpio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_gpio_match);
+
+static struct platform_driver octeon_gpio_driver = {
+ .driver = {
+ .name = "octeon_gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = octeon_gpio_match,
+ },
+ .probe = octeon_gpio_probe,
+ .remove = octeon_gpio_remove,
+};
+
+module_platform_driver(octeon_gpio_driver);
+
+MODULE_DESCRIPTION("Cavium Inc. OCTEON GPIO Driver");
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ba9876f..0dfaf20 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -195,8 +195,8 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
return;
for (;; index++) {
- ret = of_parse_phandle_with_args(np, "gpio-ranges",
- "#gpio-range-cells", index, &pinspec);
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ index, &pinspec);
if (ret)
break;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 796dbb2..8492b68 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static inline void ast_open_key(struct ast_private *ast)
{
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
#define AST_VIDMEM_SIZE_8M 0x00800000
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e2..f6f6cc7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -416,6 +416,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
return;
/*
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
+ */
+ if (oops_in_progress)
+ return;
+
+ /*
* For each CRTC in this fb, turn the connectors on/off.
*/
drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 55ab924..a6f4cb5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -857,7 +857,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 rpstat, cagf;
+ u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@@ -869,6 +869,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gen6_gt_force_wake_get(dev_priv);
+ reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ reqf *= GT_FREQUENCY_MULTIPLIER;
+
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -893,6 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
+ seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa091..c27a210 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,9 +1290,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
- ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
- if (ret && ret != -ENODEV)
- goto out;
+ if (!HAS_PCH_SPLIT(dev)) {
+ ret = vga_client_register(dev->pdev, dev, NULL,
+ i915_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ goto out;
+ }
intel_register_dsm_handler();
@@ -1348,6 +1351,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/
intel_fbdev_initial_config(dev);
+ /*
+ * Must do this after fbcon init so that
+ * vgacon_save_screen() works during the handover.
+ */
+ i915_disable_vga_mem(dev);
+
/* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true;
@@ -1667,7 +1676,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev->pdev->msi_enabled)
@@ -1706,7 +1715,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ccb28ea..69d8ed5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -157,25 +157,6 @@ MODULE_PARM_DESC(prefault_disable,
static struct drm_driver driver;
extern int intel_agp_enabled;
-#define INTEL_VGA_DEVICE(id, info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
- .class_mask = 0xff0000, \
- .vendor = 0x8086, \
- .device = id, \
- .subvendor = PCI_ANY_ID, \
- .subdevice = PCI_ANY_ID, \
- .driver_data = (unsigned long) info }
-
-#define INTEL_QUANTA_VGA_DEVICE(info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
- .class_mask = 0xff0000, \
- .vendor = 0x8086, \
- .device = 0x16a, \
- .subvendor = 0x152d, \
- .subdevice = 0x8990, \
- .driver_data = (unsigned long) info }
-
-
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -350,118 +331,41 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_vebox_ring = 1,
};
+/*
+ * Make sure any device matches here are from most specific to most
+ * general. For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+#define INTEL_PCI_IDS \
+ INTEL_I830_IDS(&intel_i830_info), \
+ INTEL_I845G_IDS(&intel_845g_info), \
+ INTEL_I85X_IDS(&intel_i85x_info), \
+ INTEL_I865G_IDS(&intel_i865g_info), \
+ INTEL_I915G_IDS(&intel_i915g_info), \
+ INTEL_I915GM_IDS(&intel_i915gm_info), \
+ INTEL_I945G_IDS(&intel_i945g_info), \
+ INTEL_I945GM_IDS(&intel_i945gm_info), \
+ INTEL_I965G_IDS(&intel_i965g_info), \
+ INTEL_G33_IDS(&intel_g33_info), \
+ INTEL_I965GM_IDS(&intel_i965gm_info), \
+ INTEL_GM45_IDS(&intel_gm45_info), \
+ INTEL_G45_IDS(&intel_g45_info), \
+ INTEL_PINEVIEW_IDS(&intel_pineview_info), \
+ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
+ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
+ INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
+ INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
+ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
+ INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
+ INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
+ INTEL_HSW_D_IDS(&intel_haswell_d_info), \
+ INTEL_HSW_M_IDS(&intel_haswell_m_info), \
+ INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
+ INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+
static const struct pci_device_id pciidlist[] = { /* aka */
- INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
- INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
- INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
- INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
- INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
- INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
- INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
- INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
- INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
- INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
- INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
- INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
- INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
- INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
- INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
- INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
- INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
- INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
- INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
- INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
- INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
- INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
- INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
- INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
- INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
- INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
- INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
- INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
- INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
- INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
- INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
- INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
- INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
- INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
- INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
- INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
- INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
- INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
- INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
- INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
- INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
- INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
- INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
- INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
- INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
- INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
- INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
- INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
- INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
- INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
- INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
- INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
- INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
- INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
- INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
- INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
- INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
- INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
- INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
- INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
- INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
- INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
- INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
- INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
- INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
- INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
- INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
- INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
- INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
- INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
- INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
- INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
- INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
- INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
- INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
- INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
- INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
- INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
- INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
- INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
- INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
- INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
- INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
- INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
- INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
- INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
- INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
- INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
- INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
+ INTEL_PCI_IDS,
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52a3785..35874b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1236,6 +1236,13 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
+ /**
+ * wq - Driver workqueue for GEM.
+ *
+ * NOTE: Work items scheduled here are not allowed to grab any modeset
+ * locks, for otherwise the flushing done in the pageflip code will
+ * result in deadlocks.
+ */
struct workqueue_struct *wq;
/* Display functions */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10..8507c6d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static int i915_gem_inactive_shrink(struct shrinker *shrinker,
- struct shrink_control *sc);
+static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+ struct shrink_control *sc);
+static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -212,7 +214,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+ return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -1695,6 +1697,7 @@ static long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
+ struct list_head still_bound_list;
struct drm_i915_gem_object *obj, *next;
long count = 0;
@@ -1709,23 +1712,55 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
- global_list) {
+ /*
+ * As we may completely rewrite the bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ */
+ INIT_LIST_HEAD(&still_bound_list);
+ while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
struct i915_vma *vma, *v;
+ obj = list_first_entry(&dev_priv->mm.bound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_bound_list);
+
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
+ /*
+ * Hold a reference whilst we unbind this object, as we may
+ * end up waiting for and retiring requests. This might
+ * release the final reference (held by the active list)
+ * and result in the object being freed from under us.
+ * in this object being freed.
+ *
+ * Note 1: Shrinking the bound list is special since only active
+ * (and hence bound objects) can contain such limbo objects, so
+ * we don't need special tricks for shrinking the unbound list.
+ * The only other place where we have to be careful with active
+ * objects suddenly disappearing due to retiring requests is the
+ * eviction code.
+ *
+ * Note 2: Even though the bound list doesn't hold a reference
+ * to the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
+ */
+ drm_gem_object_reference(&obj->base);
+
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
- if (!i915_gem_object_put_pages(obj)) {
+ if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
- if (count >= target)
- return count;
- }
+
+ drm_gem_object_unreference(&obj->base);
}
+ list_splice(&still_bound_list, &dev_priv->mm.bound_list);
return count;
}
@@ -1736,16 +1771,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return __i915_gem_shrink(dev_priv, target, true);
}
-static void
+static long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
+ long freed = 0;
i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
- global_list)
+ global_list) {
+ if (obj->pages_pin_count == 0)
+ freed += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put_pages(obj);
+ }
+ return freed;
}
static int
@@ -1774,7 +1814,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
- sg_free_table(st);
kfree(st);
return -ENOMEM;
}
@@ -4526,7 +4565,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
+ dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
@@ -4749,8 +4789,8 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
-static int
-i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
@@ -4758,45 +4798,35 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
- int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
- int cnt;
+ unsigned long count;
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
+ return SHRINK_STOP;
if (dev_priv->mm.shrinker_no_lock_stealing)
- return 0;
+ return SHRINK_STOP;
unlock = false;
}
- if (nr_to_scan) {
- nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
- if (nr_to_scan > 0)
- nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
- false);
- if (nr_to_scan > 0)
- i915_gem_shrink_all(dev_priv);
- }
-
- cnt = 0;
+ count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
- return cnt;
+ return count;
}
/* All the new VM stuff */
@@ -4860,6 +4890,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0;
}
+static unsigned long
+i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ int nr_to_scan = sc->nr_to_scan;
+ unsigned long freed;
+ bool unlock = true;
+
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return 0;
+
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
+ unlock = false;
+ }
+
+ freed = i915_gem_purge(dev_priv, nr_to_scan);
+ if (freed < nr_to_scan)
+ freed += __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (freed < nr_to_scan)
+ freed += i915_gem_shrink_all(dev_priv);
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+ return freed;
+}
+
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05..7d5752f 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
- return ERR_PTR(ret);
+ goto err;
ret = i915_gem_object_get_pages(obj);
- if (ret) {
- st = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err_unlock;
+
+ i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
- st = ERR_PTR(-ENOMEM);
- goto out;
+ ret = -ENOMEM;
+ goto err_unpin;
}
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
- if (ret) {
- kfree(st);
- st = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err_free;
src = obj->pages->sgl;
dst = st->sgl;
@@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- sg_free_table(st);
- kfree(st);
- st = ERR_PTR(-ENOMEM);
- goto out;
+ ret =-ENOMEM;
+ goto err_free_sg;
}
- i915_gem_object_pin_pages(obj);
-
-out:
mutex_unlock(&obj->base.dev->struct_mutex);
return st;
+
+err_free_sg:
+ sg_free_table(st);
+err_free:
+ kfree(st);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+ return ERR_PTR(ret);
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 792c52a..bf34577 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -310,6 +310,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
else
ret = relocate_entry_gtt(obj, reloc);
+ if (ret)
+ return ret;
+
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9969d10b..e15a1d9 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+ if (dev_priv->gtt.stolen_size == 0)
+ return 0;
+
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 558e568..aba9d74 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (WARN_ON(ring->id != RCS))
return NULL;
- obj = ring->private;
+ obj = ring->scratch.obj;
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a03b445..83cce0c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1027,8 +1027,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->display.hpd_irq_setup(dev);
spin_unlock(&dev_priv->irq_lock);
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
+ /*
+ * Our hotplug handler can grab modeset locks (by calling down into the
+ * fb helpers). Hence it must not be run on our own dev-priv->wq work
+ * queue for otherwise the flush_work in the pageflip code will
+ * deadlock.
+ */
+ schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1655,7 +1660,13 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
wake_up_all(&ring->irq_queue);
}
- queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+ /*
+ * Our reset work can grab modeset locks (since it needs to reset the
+ * state of outstanding pagelips). Hence it must not be run on our own
+ * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+ * code will deadlock.
+ */
+ schedule_work(&dev_priv->gpu_error.work);
}
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2027,9 +2038,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score > FIRE) {
- DRM_ERROR("%s on %s\n",
- stuck[i] ? "stuck" : "no progress",
- ring->name);
+ DRM_INFO("%s on %s\n",
+ stuck[i] ? "stuck" : "no progress",
+ ring->name);
rings_hung++;
}
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b6a58f7..c159e1a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -33,21 +33,6 @@
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
-/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
- */
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
-#define SNB_GMCH_GGMS_MASK 0x3
-#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
-#define SNB_GMCH_GMS_MASK 0x1f
-
-
/* PCI config space */
#define HPLLCC 0xc0 /* 855 only */
@@ -245,6 +230,7 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
+#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
@@ -693,6 +679,23 @@
#define FPGA_DBG_RM_NOCLAIM (1<<31)
#define DERRMR 0x44050
+#define DERRMR_PIPEA_SCANLINE (1<<0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
+#define DERRMR_PIPEA_VBLANK (1<<3)
+#define DERRMR_PIPEA_HBLANK (1<<5)
+#define DERRMR_PIPEB_SCANLINE (1<<8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
+#define DERRMR_PIPEB_VBLANK (1<<11)
+#define DERRMR_PIPEB_HBLANK (1<<13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1<<14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
+#define DERRMR_PIPEC_VBLANK (1<<21)
+#define DERRMR_PIPEC_HBLANK (1<<22)
+
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
@@ -3310,6 +3313,7 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a777e7f..c8c4112 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -224,6 +224,18 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.rpe_delay));
+}
+
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@@ -366,6 +378,7 @@ static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@@ -409,6 +422,14 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
+static const struct attribute *vlv_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_vlv_rpe_freq_mhz.attr,
+ NULL,
+};
+
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -492,11 +513,13 @@ void i915_setup_sysfs(struct drm_device *dev)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
- if (INTEL_INFO(dev)->gen >= 6) {
+ ret = 0;
+ if (IS_VALLEYVIEW(dev))
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+ else if (INTEL_INFO(dev)->gen >= 6)
ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
- if (ret)
- DRM_ERROR("gen6 sysfs setup failed\n");
- }
+ if (ret)
+ DRM_ERROR("RPS sysfs setup failed\n");
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
&error_state_attr);
@@ -507,7 +530,10 @@ void i915_setup_sysfs(struct drm_device *dev)
void i915_teardown_sysfs(struct drm_device *dev)
{
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
- sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (IS_VALLEYVIEW(dev))
+ sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+ else
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5a3875..ea9022e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -688,7 +688,7 @@ static void intel_crt_reset(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
- if (HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 38452d8..2489d0b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2077,8 +2077,10 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
else
dspcntr &= ~DISPPLANE_TILED;
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ if (IS_HASWELL(dev))
+ dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
+ else
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr);
@@ -6762,8 +6764,10 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE;
+ cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
+ }
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
@@ -7309,8 +7313,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
}
}
- pipe_config->adjusted_mode.clock = clock.dot *
- pipe_config->pixel_multiplier;
+ pipe_config->adjusted_mode.clock = clock.dot;
}
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@@ -7828,12 +7831,6 @@ err:
return ret;
}
-/*
- * On gen7 we currently use the blit ring because (in early silicon at least)
- * the render ring doesn't give us interrpts for page flip completion, which
- * means clients will hang after the first flip is queued. Fortunately the
- * blit ring generates interrupts properly, so use it instead.
- */
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -7842,9 +7839,13 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
- int ret;
+ int len, ret;
+
+ ring = obj->ring;
+ if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
+ ring = &dev_priv->ring[BCS];
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
@@ -7866,10 +7867,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto err_unpin;
}
- ret = intel_ring_begin(ring, 4);
+ len = 4;
+ if (ring->id == RCS)
+ len += 6;
+
+ ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
+ /* Unmask the flip-done completion message. Note that the bspec says that
+ * we should do this for both the BCS and RCS, and that we must not unmask
+ * more than one flip event at any time (or ensure that one flip message
+ * can be sent by waiting for flip-done prior to queueing new flips).
+ * Experimentation says that BCS works despite DERRMR masking all
+ * flip-done completion events and that unmasking all planes at once
+ * for the RCS also doesn't appear to drop events. Setting the DERRMR
+ * to zero does lead to lockups within MI_DISPLAY_FLIP.
+ */
+ if (ring->id == RCS) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ }
+
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -10022,6 +10047,33 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
+static void i915_enable_vga_mem(struct drm_device *dev)
+{
+ /* Enable VGA memory on Intel HD */
+ if (HAS_PCH_SPLIT(dev)) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+ vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+ VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO |
+ VGA_RSRC_NORMAL_MEM);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ }
+}
+
+void i915_disable_vga_mem(struct drm_device *dev)
+{
+ /* Disable VGA memory on Intel HD */
+ if (HAS_PCH_SPLIT(dev)) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+ vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+ VGA_RSRC_NORMAL_IO |
+ VGA_RSRC_NORMAL_MEM);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ }
+}
+
void intel_modeset_init_hw(struct drm_device *dev)
{
intel_init_power_well(dev);
@@ -10300,6 +10352,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
+ i915_disable_vga_mem(dev);
}
}
@@ -10513,6 +10566,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
+ i915_enable_vga_mem(dev);
+
intel_disable_gt_powersave(dev);
ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1760808..a47799e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -551,7 +551,7 @@ extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
-extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
@@ -792,5 +792,6 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+extern void i915_disable_vga_mem(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d33278..831a5c0 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -128,8 +128,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *fixed_mode =
- lvds_encoder->attached_connector->base.panel.fixed_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
@@ -183,9 +183,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
- if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(lvds_encoder->reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cfb8fb6..119771f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLE_BACKLIGHT_FAILED;
intel_panel_set_backlight(dev, bclp, 255);
- iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
+ iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a43c33b..42114ec 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -36,20 +36,12 @@
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
void
-intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
+ drm_mode_copy(adjusted_mode, fixed_mode);
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
-
- adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4605682..0c115cc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3447,14 +3447,24 @@ int intel_enable_rc6(const struct drm_device *dev)
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enabled_intrs;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock);
+
/* only unmask PM interrupts we need. Mask all others. */
- I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+ enabled_intrs = GEN6_PM_RPS_EVENTS;
+
+ /* IVB and SNB hard hangs on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ */
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -4950,8 +4960,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev);
-
/* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f05ccea..460ee10 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,16 +33,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
- struct drm_i915_gem_object *obj;
- volatile u32 *cpu_page;
- u32 gtt_offset;
-};
-
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/*
@@ -481,68 +468,43 @@ out:
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc;
- struct drm_i915_gem_object *obj;
int ret;
- if (ring->private)
+ if (ring->scratch.obj)
return 0;
- pc = kmalloc(sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
-
- obj = i915_gem_alloc_object(ring->dev, 4096);
- if (obj == NULL) {
+ ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (ring->scratch.obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
- ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
if (ret)
goto err_unref;
- pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
- pc->cpu_page = kmap(sg_page(obj->pages->sgl));
- if (pc->cpu_page == NULL) {
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+ ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+ if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- ring->name, pc->gtt_offset);
-
- pc->obj = obj;
- ring->private = pc;
+ ring->name, ring->scratch.gtt_offset);
return 0;
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin(ring->scratch.obj);
err_unref:
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&ring->scratch.obj->base);
err:
- kfree(pc);
return ret;
}
-static void
-cleanup_pipe_control(struct intel_ring_buffer *ring)
-{
- struct pipe_control *pc = ring->private;
- struct drm_i915_gem_object *obj;
-
- obj = pc->obj;
-
- kunmap(sg_page(obj->pages->sgl));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
-
- kfree(pc);
-}
-
static int init_render_ring(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- if (!ring->private)
+ if (ring->scratch.obj == NULL)
return;
- if (HAS_BROKEN_CS_TLB(dev))
- drm_gem_object_unreference(to_gem_object(ring->private));
-
- if (INTEL_INFO(dev)->gen >= 5)
- cleanup_pipe_control(ring);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ kunmap(sg_page(ring->scratch.obj->pages->sgl));
+ i915_gem_object_unpin(ring->scratch.obj);
+ }
- ring->private = NULL;
+ drm_gem_object_unreference(&ring->scratch.obj->base);
+ ring->scratch.obj = NULL;
}
static void
@@ -742,8 +704,7 @@ do { \
static int
pc_render_add_request(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
- intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- struct pipe_control *pc = ring->private;
- return pc->cpu_page[0];
+ return ring->scratch.cpu_page[0];
}
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
- struct pipe_control *pc = ring->private;
- pc->cpu_page[0] = seqno;
+ ring->scratch.cpu_page[0] = seqno;
}
static bool
@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} else {
- struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
+ u32 cs_offset = ring->scratch.gtt_offset;
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return ret;
}
- ring->private = obj;
+ ring->scratch.obj = obj;
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 432ad53..68b1ca974 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -155,7 +155,11 @@ struct intel_ring_buffer {
struct intel_ring_hangcheck hangcheck;
- void *private;
+ struct {
+ struct drm_i915_gem_object *obj;
+ u32 gtt_offset;
+ volatile u32 *cpu_page;
+ } scratch;
};
static inline bool
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 317e058..85037b9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1151,11 +1151,10 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = intel_encoder->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ &crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1213,13 +1212,15 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ input_dtd.part1.clock /= crtc->config.pixel_multiplier;
+
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (intel_crtc->config.pixel_multiplier) {
+ switch (crtc->config.pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1252,9 +1253,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
}
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
- sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
- sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL(crtc->pipe);
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1264,7 +1265,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+ sdvox |= (crtc->config.pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78b621c..ad6ec4b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,8 +260,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
- /* must disable */
- sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ if (IS_HASWELL(dev))
+ sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
+ else
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
sprctl |= SPRITE_ENABLE;
if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8f5bc86..8649f1c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -261,7 +261,7 @@ void intel_uncore_init(struct drm_device *dev)
}
}
-void intel_uncore_sanitize(struct drm_device *dev)
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -272,6 +272,11 @@ void intel_uncore_sanitize(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+ intel_uncore_forcewake_reset(dev);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
@@ -549,6 +554,8 @@ static int gen6_do_reset(struct drm_device *dev)
/* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+ intel_uncore_forcewake_reset(dev);
+
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2e11ea0..57cda2a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -579,8 +579,22 @@ static void
init_reserved(struct nvbios_init *init)
{
u8 opcode = nv_ro08(init->bios, init->offset);
- trace("RESERVED\t0x%02x\n", opcode);
- init->offset += 1;
+ u8 length, i;
+
+ switch (opcode) {
+ case 0xaa:
+ length = 4;
+ break;
+ default:
+ length = 1;
+ break;
+ }
+
+ trace("RESERVED 0x%02x\t", opcode);
+ for (i = 1; i < length; i++)
+ cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+ cont("\n");
+ init->offset += length;
}
/**
@@ -1437,7 +1451,7 @@ init_configure_mem(struct nvbios_init *init)
data = init_rdvgai(init, 0x03c4, 0x01);
init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
- while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) {
+ for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
switch (addr) {
case 0x10021c: /* CKE_NORMAL */
case 0x1002d0: /* CMD_REFRESH */
@@ -2135,6 +2149,7 @@ static struct nvbios_init_opcode {
[0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if },
[0xa9] = { init_gpio_ne },
+ [0xaa] = { init_reserved },
};
#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d2712e6..7848590 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -278,7 +278,6 @@ nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp;
- u32 pclass = dev->pdev->class >> 8;
int ret, gen;
disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -340,29 +339,25 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (nouveau_modeset == 1 ||
- (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) {
- if (drm->vbios.dcb.entries) {
- if (nv_device(drm->device)->card_type < NV_50)
- ret = nv04_display_create(dev);
- else
- ret = nv50_display_create(dev);
- } else {
- ret = 0;
- }
-
- if (ret)
- goto disp_create_err;
+ if (drm->vbios.dcb.entries) {
+ if (nv_device(drm->device)->card_type < NV_50)
+ ret = nv04_display_create(dev);
+ else
+ ret = nv50_display_create(dev);
+ } else {
+ ret = 0;
+ }
- if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
- if (ret)
- goto vblank_err;
- }
+ if (ret)
+ goto disp_create_err;
- nouveau_backlight_init(dev);
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ goto vblank_err;
}
+ nouveau_backlight_init(dev);
return 0;
vblank_err:
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8863644..e893c53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -636,7 +636,8 @@ int nouveau_pmops_resume(struct device *dev)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -671,7 +672,8 @@ static int nouveau_pmops_thaw(struct device *dev)
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -906,7 +908,8 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nv_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8f6d63d..a86ecf6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -454,7 +454,8 @@ nouveau_fbcon_init(struct drm_device *dev)
int preferred_bpp;
int ret;
- if (!dev->mode_config.num_crtc)
+ if (!dev->mode_config.num_crtc ||
+ (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return 0;
fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ca5492a..0843ebc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -104,9 +104,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
- kfree(nvbe);
+ if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
return NULL;
- }
return &nvbe->ttm.ttm;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dfac796..32923d2 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- /* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+ /* some dce3.x boards have a bug in their transmitter control table.
+ * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+ (rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 084e694..05ff315 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2340,12 +2340,6 @@ int btc_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 3cce533..8996274 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -4748,12 +4748,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
if (pi->pcie_performance_request)
ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
- ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("ci_dpm_force_performance_level failed\n");
- return ret;
- }
-
cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 53b43dd..252e10a 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
{
+ unsigned long flags;
u32 data, original_data;
u32 addr;
u32 extra_shift;
- int ret;
+ int ret = 0;
if (smc_start_address & 3)
return -EINVAL;
@@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
@@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_IND_DATA_0);
@@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
}
- return 0;
+
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
void ci_start_smc(struct radeon_device *rdev)
@@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{
+ unsigned long flags;
u32 ucode_start_address;
u32 ucode_size;
const u8 *src;
@@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
return -EINVAL;
src = (const u8 *)rdev->smc_fw->data;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) {
@@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_size -= 4;
}
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return 0;
}
@@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
int ci_read_smc_sram_dword(struct radeon_device *rdev,
u32 smc_address, u32 *value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = ci_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ *value = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- *value = RREG32(SMC_IND_DATA_0);
- return 0;
+ return ret;
}
int ci_write_smc_sram_dword(struct radeon_device *rdev,
u32 smc_address, u32 value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = ci_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_IND_DATA_0, value);
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a3bba05..adbdb65 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
static void cik_program_aspm(struct radeon_device *rdev);
static void cik_init_pg(struct radeon_device *rdev);
static void cik_init_cg(struct radeon_device *rdev);
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable);
/* get temperature in millidegrees */
int ci_get_temp(struct radeon_device *rdev)
@@ -120,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev)
*/
u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_INDEX, reg);
(void)RREG32(PCIE_INDEX);
r = RREG32(PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_INDEX, reg);
(void)RREG32(PCIE_INDEX);
WREG32(PCIE_DATA, v);
(void)RREG32(PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
static const u32 spectre_rlc_save_restore_register_list[] =
@@ -2722,7 +2731,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
} else if ((rdev->pdev->device == 0x1309) ||
(rdev->pdev->device == 0x130A) ||
(rdev->pdev->device == 0x130D) ||
- (rdev->pdev->device == 0x1313)) {
+ (rdev->pdev->device == 0x1313) ||
+ (rdev->pdev->device == 0x131D)) {
rdev->config.cik.max_cu_per_sh = 6;
rdev->config.cik.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x1306) ||
@@ -4013,6 +4023,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
{
int r;
+ cik_enable_gui_idle_interrupt(rdev, false);
+
r = cik_cp_load_microcode(rdev);
if (r)
return r;
@@ -4024,6 +4036,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
if (r)
return r;
+ cik_enable_gui_idle_interrupt(rdev, true);
+
return 0;
}
@@ -5376,7 +5390,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev,
void cik_update_cg(struct radeon_device *rdev,
u32 block, bool enable)
{
+
if (block & RADEON_CG_BLOCK_GFX) {
+ cik_enable_gui_idle_interrupt(rdev, false);
/* order matters! */
if (enable) {
cik_enable_mgcg(rdev, true);
@@ -5385,6 +5401,7 @@ void cik_update_cg(struct radeon_device *rdev,
cik_enable_cgcg(rdev, false);
cik_enable_mgcg(rdev, false);
}
+ cik_enable_gui_idle_interrupt(rdev, true);
}
if (block & RADEON_CG_BLOCK_MC) {
@@ -5541,7 +5558,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 data, orig;
- if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
orig = data = RREG32(RLC_PG_CNTL);
data |= GFX_PG_ENABLE;
if (orig != data)
@@ -5805,7 +5822,7 @@ static void cik_init_pg(struct radeon_device *rdev)
if (rdev->pg_flags) {
cik_enable_sck_slowdown_on_pu(rdev, true);
cik_enable_sck_slowdown_on_pd(rdev, true);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
cik_init_gfx_cgpg(rdev);
cik_enable_cp_pg(rdev, true);
cik_enable_gds_pg(rdev, true);
@@ -5819,7 +5836,7 @@ static void cik_fini_pg(struct radeon_device *rdev)
{
if (rdev->pg_flags) {
cik_update_gfx_pg(rdev, false);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
cik_enable_cp_pg(rdev, false);
cik_enable_gds_pg(rdev, false);
}
@@ -5895,7 +5912,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
u32 tmp;
/* gfx ring */
- WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING0, tmp);
/* sdma */
tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
@@ -6036,8 +6055,7 @@ static int cik_irq_init(struct radeon_device *rdev)
*/
int cik_irq_set(struct radeon_device *rdev)
{
- u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE |
- PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+ u32 cp_int_cntl;
u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
@@ -6058,6 +6076,10 @@ int cik_irq_set(struct radeon_device *rdev)
return 0;
}
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 95a66db..91bb470 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2014,12 +2014,6 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
if (eg_pi->pcie_performance_request)
cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 8953255e..85a69d2 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -28,22 +28,30 @@
static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
u32 block_offset, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->end_idx_lock, flags);
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
+ spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
+
return r;
}
static void dce6_endpoint_wreg(struct radeon_device *rdev,
u32 block_offset, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->end_idx_lock, flags);
if (ASIC_IS_DCE8(rdev))
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
else
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+ spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
}
#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
@@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset = dig->afmt->offset;
- u32 id = dig->afmt->pin->id;
if (!dig->afmt->pin)
return;
- WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
+ WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+ AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
}
void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index ecd6080..7139906 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev);
static void kv_enable_new_levels(struct radeon_device *rdev);
static void kv_program_nbps_index_settings(struct radeon_device *rdev,
struct radeon_ps *new_rps);
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
static int kv_set_enabled_levels(struct radeon_device *rdev);
static int kv_force_dpm_highest(struct radeon_device *rdev);
static int kv_force_dpm_lowest(struct radeon_device *rdev);
@@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev)
static void kv_program_vc(struct radeon_device *rdev)
{
- WREG32_SMC(CG_FTV_0, 0x3FFFC000);
+ WREG32_SMC(CG_FTV_0, 0x3FFFC100);
}
static void kv_clear_vc(struct radeon_device *rdev)
@@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
static int kv_unforce_levels(struct radeon_device *rdev)
{
- return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+ if (rdev->family == CHIP_KABINI)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+ else
+ return kv_set_enabled_levels(rdev);
}
static int kv_update_sclk_t(struct radeon_device *rdev)
@@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
if (table && table->count) {
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].clk == pi->boot_pl.sclk) ||
- (i == 0))
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk == pi->boot_pl.sclk)
break;
}
@@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
if (table->num_max_dpm_entries == 0)
return -EINVAL;
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
- (i == 0))
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
break;
}
@@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
}
+static void kv_reset_acp_boot_level(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->acp_boot_level = 0xff;
+}
+
static void kv_update_current_ps(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev,
pi->requested_rps.ps_priv = &pi->requested_ps;
}
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ if (pi->bapm_enable) {
+ ret = kv_smc_bapm_enable(rdev, enable);
+ if (ret)
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ }
+}
+
int kv_dpm_enable(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev)
return ret;
}
+ kv_reset_acp_boot_level(rdev);
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
+ ret = kv_smc_bapm_enable(rdev, false);
+ if (ret) {
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+
/* powerdown unused blocks for now */
kv_dpm_powergate_acp(rdev, true);
kv_dpm_powergate_samu(rdev, true);
@@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false);
+ kv_smc_bapm_enable(rdev, false);
+
/* powerup blocks */
kv_dpm_powergate_acp(rdev, false);
kv_dpm_powergate_samu(rdev, false);
@@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
return kv_enable_samu_dpm(rdev, !gate);
}
+static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].clk >= 0) /* XXX */
+ break;
+ }
+
+ if (i >= table->count)
+ i = table->count - 1;
+
+ return i;
+}
+
+static void kv_update_acp_boot_level(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u8 acp_boot_level;
+
+ if (!pi->caps_stable_p_state) {
+ acp_boot_level = kv_get_acp_boot_level(rdev);
+ if (acp_boot_level != pi->acp_boot_level) {
+ pi->acp_boot_level = acp_boot_level;
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ (1 << pi->acp_boot_level));
+ }
+ }
+}
+
static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
if (pi->caps_stable_p_state)
pi->acp_boot_level = table->count - 1;
else
- pi->acp_boot_level = 0;
+ pi->acp_boot_level = kv_get_acp_boot_level(rdev);
ret = kv_copy_bytes_to_smc(rdev,
pi->dpm_table_start +
@@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
}
}
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
- (i == 0)) {
- pi->highest_valid = i;
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
break;
- }
}
+ pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
@@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
}
}
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if (table->entries[i].sclk_frequency <=
- new_ps->levels[new_ps->num_levels - 1].sclk ||
- i == 0) {
- pi->highest_valid = i;
+ new_ps->levels[new_ps->num_levels - 1].sclk)
break;
- }
}
+ pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk -
@@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false);
+ if (pi->bapm_enable) {
+ ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
+ if (ret) {
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+ }
+
if (rdev->family == CHIP_KABINI) {
if (pi->enable_dpm) {
kv_set_valid_clock_range(rdev, new_ps);
@@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
#endif
+ kv_update_acp_boot_level(rdev);
kv_update_sclk_t(rdev);
kv_enable_nb_dpm(rdev);
}
@@ -1785,7 +1854,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), true);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
return 0;
}
@@ -1806,12 +1874,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev)
void kv_dpm_reset_asic(struct radeon_device *rdev)
{
- kv_force_lowest_valid(rdev);
- kv_init_graphics_levels(rdev);
- kv_program_bootup_state(rdev);
- kv_upload_dpm_settings(rdev);
- kv_force_lowest_valid(rdev);
- kv_unforce_levels(rdev);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (rdev->family == CHIP_KABINI) {
+ kv_force_lowest_valid(rdev);
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+ } else {
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_freeze_sclk_dpm(rdev, true);
+ kv_upload_dpm_settings(rdev);
+ kv_freeze_sclk_dpm(rdev, false);
+ kv_set_enabled_level(rdev, pi->graphics_boot_level);
+ }
}
//XXX use sumo_dpm_display_configuration_changed
@@ -1871,12 +1950,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
if (ret)
return ret;
- for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
+ for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
if (enable_mask & (1 << i))
break;
}
- return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ if (rdev->family == CHIP_KABINI)
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ else
+ return kv_set_enabled_level(rdev, i);
}
static int kv_force_dpm_lowest(struct radeon_device *rdev)
@@ -1893,7 +1975,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
break;
}
- return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ if (rdev->family == CHIP_KABINI)
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ else
+ return kv_set_enabled_level(rdev, i);
}
static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
@@ -1911,9 +1996,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
if (!pi->caps_sclk_ds)
return 0;
- for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
+ for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
temp = sclk / sumo_get_sleep_divider_from_id(i);
- if ((temp >= min) || (i == 0))
+ if (temp >= min)
break;
}
@@ -2039,12 +2124,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
ps->dpmx_nb_ps_lo = 0x1;
ps->dpmx_nb_ps_hi = 0x0;
} else {
- ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_lo = 0x3;
ps->dpm0_pg_nb_ps_hi = 0x0;
- ps->dpmx_nb_ps_lo = 0x2;
- ps->dpmx_nb_ps_hi = 0x1;
+ ps->dpmx_nb_ps_lo = 0x3;
+ ps->dpmx_nb_ps_hi = 0x0;
- if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
pi->disable_nb_ps3_in_battery;
@@ -2210,6 +2295,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev)
}
}
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
+{
+ u32 new_mask = (1 << level);
+
+ return kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ new_mask);
+}
+
static int kv_set_enabled_levels(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
index 32bb079..8cef752 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.h
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 *value, u32 limit);
int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
int kv_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index 34a226d..0000b59 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
}
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+ else
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
int kv_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f7b625c..6c398a4 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3865,12 +3865,6 @@ int ni_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("ni_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 6828428..5670b82 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
+#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
+#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9fc61dd..2417571 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t data;
+ spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
r100_pll_errata_after_index(rdev);
data = RREG32(RADEON_CLOCK_CNTL_DATA);
r100_pll_errata_after_data(rdev);
+ spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
return data;
}
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
r100_pll_errata_after_index(rdev);
WREG32(RADEON_CLOCK_CNTL_DATA, v);
r100_pll_errata_after_data(rdev);
+ spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
}
static void r100_set_safe_registers(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4e796ec..6edf2b3 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev)
u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
r = RREG32(R_0001FC_MC_IND_DATA);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
S_0001F8_MC_IND_WR_EN(1));
WREG32(R_0001FC_MC_IND_DATA, v);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void r420_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ea4d373..2a1b187 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev)
return rdev->clock.spll.reference_freq;
}
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ return 0;
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
@@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
r = RREG32(R_0028FC_MC_DATA);
WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
S_0028F8_MC_IND_WR_EN(1));
WREG32(R_0028FC_MC_DATA, v);
WREG32(R_0028F8_MC_INDEX, 0x7F);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void r600_mc_program(struct radeon_device *rdev)
@@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
*/
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
r = RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
WREG32(PCIE_PORT_DATA, (v));
(void)RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
/*
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa0de46..e65f211 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
void r600_free_extended_power_table(struct radeon_device *rdev)
{
- if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
- kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
- if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
- kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
- if (rdev->pm.dpm.dyn_state.ppm_table)
- kfree(rdev->pm.dpm.dyn_state.ppm_table);
- if (rdev->pm.dpm.dyn_state.cac_tdp_table)
- kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
- if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
+ struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
+
+ kfree(dyn_state->vddc_dependency_on_sclk.entries);
+ kfree(dyn_state->vddci_dependency_on_mclk.entries);
+ kfree(dyn_state->vddc_dependency_on_mclk.entries);
+ kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+ kfree(dyn_state->cac_leakage_table.entries);
+ kfree(dyn_state->phase_shedding_limits_table.entries);
+ kfree(dyn_state->ppm_table);
+ kfree(dyn_state->cac_tdp_table);
+ kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
}
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 454f90a..e673fe2 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1040,7 +1040,7 @@
# define HDMI0_AVI_INFO_CONT (1 << 1)
# define HDMI0_AUDIO_INFO_SEND (1 << 4)
# define HDMI0_AUDIO_INFO_CONT (1 << 5)
-# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
# define HDMI0_MPEG_INFO_SEND (1 << 8)
# define HDMI0_MPEG_INFO_CONT (1 << 9)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ff8b564..a400ac1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -181,7 +181,7 @@ extern int radeon_aspm;
#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
/* PG flags */
-#define RADEON_PG_SUPPORT_GFX_CG (1 << 0)
+#define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
#define RADEON_PG_SUPPORT_UVD (1 << 3)
@@ -1778,6 +1778,7 @@ struct radeon_asic {
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev);
void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
+ void (*enable_bapm)(struct radeon_device *rdev, bool enable);
} dpm;
/* pageflipping */
struct {
@@ -2110,6 +2111,28 @@ struct radeon_device {
resource_size_t rmmio_size;
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
+ /* protects concurrent SMC based register access */
+ spinlock_t smc_idx_lock;
+ /* protects concurrent PLL register access */
+ spinlock_t pll_idx_lock;
+ /* protects concurrent MC register access */
+ spinlock_t mc_idx_lock;
+ /* protects concurrent PCIE register access */
+ spinlock_t pcie_idx_lock;
+ /* protects concurrent PCIE_PORT register access */
+ spinlock_t pciep_idx_lock;
+ /* protects concurrent PIF register access */
+ spinlock_t pif_idx_lock;
+ /* protects concurrent CG register access */
+ spinlock_t cg_idx_lock;
+ /* protects concurrent UVD register access */
+ spinlock_t uvd_idx_lock;
+ /* protects concurrent RCU register access */
+ spinlock_t rcu_idx_lock;
+ /* protects concurrent DIDT register access */
+ spinlock_t didt_idx_lock;
+ /* protects concurrent ENDPOINT (audio) register access */
+ spinlock_t end_idx_lock;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
@@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
*/
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
r = RREG32(RADEON_PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
return r;
}
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
WREG32(RADEON_PCIE_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
}
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg));
r = RREG32(TN_SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return r;
}
static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg));
WREG32(TN_SMC_IND_DATA_0, (v));
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
r = RREG32(R600_RCU_DATA);
+ spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
return r;
}
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
WREG32(R600_RCU_DATA, (v));
+ spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
}
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
r = RREG32(EVERGREEN_CG_IND_DATA);
+ spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
return r;
}
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
WREG32(EVERGREEN_CG_IND_DATA, (v));
+ spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
}
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r;
}
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
}
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r;
}
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
}
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(R600_UVD_CTX_DATA);
+ spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
return r;
}
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(R600_UVD_CTX_DATA, (v));
+ spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
}
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
r = RREG32(CIK_DIDT_IND_DATA);
+ spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
return r;
}
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
WREG32(CIK_DIDT_IND_DATA, (v));
+ spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
}
void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
+#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
/* Common functions */
/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 630853b..5003385 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1037,6 +1037,7 @@ static struct radeon_asic rv6xx_asic = {
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
+ .set_uvd_clocks = &r600_set_uvd_clocks,
},
.dpm = {
.init = &rv6xx_dpm_init,
@@ -1126,6 +1127,7 @@ static struct radeon_asic rs780_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
+ .set_uvd_clocks = &r600_set_uvd_clocks,
},
.dpm = {
.init = &rs780_dpm_init,
@@ -1141,6 +1143,7 @@ static struct radeon_asic rs780_asic = {
.get_mclk = &rs780_dpm_get_mclk,
.print_power_state = &rs780_dpm_print_power_state,
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &rs780_dpm_force_performance_level,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
@@ -1791,6 +1794,7 @@ static struct radeon_asic trinity_asic = {
.print_power_state = &trinity_dpm_print_power_state,
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
.force_performance_level = &trinity_dpm_force_performance_level,
+ .enable_bapm = &trinity_dpm_enable_bapm,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2166,6 +2170,7 @@ static struct radeon_asic kv_asic = {
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
+ .enable_bapm = &kv_dpm_enable_bapm,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2390,7 +2395,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0 |
- /*RADEON_PG_SUPPORT_GFX_CG | */
+ /*RADEON_PG_SUPPORT_GFX_PG | */
RADEON_PG_SUPPORT_SDMA;
break;
case CHIP_OLAND:
@@ -2479,7 +2484,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
- /*RADEON_PG_SUPPORT_GFX_CG |
+ /*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_GFX_DMG |
RADEON_PG_SUPPORT_UVD |
@@ -2507,7 +2512,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
- /*RADEON_PG_SUPPORT_GFX_CG |
+ /*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_UVD |
RADEON_PG_SUPPORT_VCE |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 818bbe6..70c29d5 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev);
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
/* r600 dma */
@@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
/*
* rv770,rv730,rv710,rv740
@@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
struct seq_file *m);
int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
int kv_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* uvd v1.0 */
uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2399f25..79159b5 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
}
}
+ if (property == rdev->mode_info.audio_property) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_connector->audio != val) {
+ radeon_connector->audio = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -1420,7 +1435,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+ /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
@@ -1489,6 +1504,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.force = radeon_dvi_force,
};
+static const struct drm_connector_funcs radeon_edp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_lvds_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_lvds_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
@@ -1580,8 +1613,6 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1598,6 +1629,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
@@ -1610,6 +1645,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
@@ -1619,6 +1658,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1634,6 +1676,10 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_lvds_bridge_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1708,6 +1754,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -1748,6 +1799,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1787,6 +1843,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
@@ -1797,7 +1858,7 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+ drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a560844..ac6ece6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_trace.h"
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
@@ -80,9 +81,11 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.written = !!r->write_domain;
- /* the first reloc of an UVD job is the
- msg and that must be in VRAM */
- if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
+ /* the first reloc of an UVD job is the msg and that must be in
+ VRAM, also but everything into VRAM on AGP cards to avoid
+ image corruptions */
+ if (p->ring == R600_RING_TYPE_UVD_INDEX &&
+ (i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
/* TODO: is this still needed for NI+ ? */
p->relocs[i].lobj.domain =
RADEON_GEM_DOMAIN_VRAM;
@@ -559,6 +562,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
}
+ trace_radeon_cs(&parser);
+
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 16cb879..e29faa7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
spin_lock_init(&rdev->mmio_idx_lock);
+ spin_lock_init(&rdev->smc_idx_lock);
+ spin_lock_init(&rdev->pll_idx_lock);
+ spin_lock_init(&rdev->mc_idx_lock);
+ spin_lock_init(&rdev->pcie_idx_lock);
+ spin_lock_init(&rdev->pciep_idx_lock);
+ spin_lock_init(&rdev->pif_idx_lock);
+ spin_lock_init(&rdev->cg_idx_lock);
+ spin_lock_init(&rdev->uvd_idx_lock);
+ spin_lock_init(&rdev->rcu_idx_lock);
+ spin_lock_init(&rdev->didt_idx_lock);
+ spin_lock_init(&rdev->end_idx_lock);
if (rdev->family >= CHIP_BONAIRE) {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b055bdd..0d1aa05 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
{ UNDERSCAN_AUTO, "auto" },
};
+static struct drm_prop_enum_list radeon_audio_enum_list[] =
+{ { RADEON_AUDIO_DISABLE, "off" },
+ { RADEON_AUDIO_ENABLE, "on" },
+ { RADEON_AUDIO_AUTO, "auto" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
@@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
+ sz = ARRAY_SIZE(radeon_audio_enum_list);
+ rdev->mode_info.audio_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "audio",
+ radeon_audio_enum_list, sz);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb4445f..cdd12dc 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_audio = 0;
+int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d908d8d..ef63d3f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -247,6 +247,8 @@ struct radeon_mode_info {
struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property;
+ /* audio */
+ struct drm_property *audio_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -471,6 +473,12 @@ struct radeon_router {
u8 cd_mux_state;
};
+enum radeon_connector_audio {
+ RADEON_AUDIO_DISABLE = 0,
+ RADEON_AUDIO_ENABLE = 1,
+ RADEON_AUDIO_AUTO = 2
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -489,6 +497,7 @@ struct radeon_connector {
struct radeon_hpd hpd;
struct radeon_router router;
struct radeon_i2c_chan *router_bus;
+ enum radeon_connector_audio audio;
};
struct radeon_framebuffer {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d755536..87e1d69 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
- if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ mutex_lock(&rdev->pm.mutex);
+ if (power_supply_is_system_supplied() > 0)
+ rdev->pm.dpm.ac_power = true;
+ else
+ rdev->pm.dpm.ac_power = false;
+ if (rdev->asic->dpm.enable_bapm)
+ radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ mutex_unlock(&rdev->pm.mutex);
+ } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
@@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile;
@@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex);
@@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int pm = rdev->pm.pm_method;
@@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
/* we don't support the legacy modes with dpm */
@@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
@@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex);
@@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
@@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level;
int ret = 0;
@@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int temp;
@@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
+static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct radeon_device *rdev = ddev->dev_private;
+ int hyst = to_sensor_dev_attr(attr)->index;
+ int temp;
+
+ if (hyst)
+ temp = rdev->pm.dpm.thermal.min_temp;
+ else
+ temp = rdev->pm.dpm.thermal.max_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
static ssize_t radeon_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev,
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
+static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct radeon_device *rdev = ddev->dev_private;
+
+ /* Skip limit attributes if DPM is not enabled */
+ if (rdev->pm.pm_method != PM_METHOD_DPM &&
+ (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
+ .is_visible = hwmon_attributes_visible,
};
static int radeon_hwmon_init(struct radeon_device *rdev)
@@ -870,10 +917,13 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
- /* force low perf level for thermal */
- if (rdev->pm.dpm.thermal_active &&
- rdev->asic->dpm.force_performance_level) {
- radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ if (rdev->asic->dpm.force_performance_level) {
+ if (rdev->pm.dpm.thermal_active)
+ /* force low perf level for thermal */
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ else
+ /* otherwise, enable auto */
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
}
done:
@@ -1102,9 +1152,10 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
{
int ret;
- /* default to performance state */
+ /* default to balanced state */
rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
rdev->pm.default_sclk = rdev->clock.default_sclk;
rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index eafd816..f7e3678 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create,
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
+TRACE_EVENT(radeon_cs,
+ TP_PROTO(struct radeon_cs_parser *p),
+ TP_ARGS(p),
+ TP_STRUCT__entry(
+ __field(u32, ring)
+ __field(u32, dw)
+ __field(u32, fences)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = p->ring;
+ __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+ __entry->fences = radeon_fence_count_emitted(
+ p->rdev, p->ring);
+ ),
+ TP_printk("ring=%u, dw=%u, fences=%u",
+ __entry->ring, __entry->dw,
+ __entry->fences)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
TP_ARGS(dev, seqno)
);
-DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
-);
-
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b8074a8..9566b59 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev)
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, reg & 0xff);
r = RREG32(RS480_NB_MC_DATA);
WREG32(RS480_NB_MC_INDEX, 0xff);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
WREG32(RS480_NB_MC_DATA, (v));
WREG32(RS480_NB_MC_INDEX, 0xff);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 670b555..6acba80 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1));
- return RREG32(R_000074_MC_IND_DATA);
+ r = RREG32(R_000074_MC_IND_DATA);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+ return r;
}
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
WREG32(R_000074_MC_IND_DATA, v);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void rs600_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index d8ddfb3..1447d79 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
r = RREG32(R_00007C_MC_DATA);
WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
S_000078_MC_IND_WR_EN(1));
WREG32(R_00007C_MC_DATA, v);
WREG32(R_000078_MC_INDEX, 0x7F);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void rs690_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index d1a1ce7..6af8505 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -62,9 +62,7 @@ static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
radeon_crtc = to_radeon_crtc(crtc);
pi->crtc_id = radeon_crtc->crtc_id;
if (crtc->mode.htotal && crtc->mode.vtotal)
- pi->refresh_rate =
- (crtc->mode.clock * 1000) /
- (crtc->mode.htotal * crtc->mode.vtotal);
+ pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
break;
}
}
@@ -376,9 +374,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
}
-static void rs780_force_voltage_to_high(struct radeon_device *rdev)
+static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
{
- struct igp_power_info *pi = rs780_get_pi(rdev);
struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
@@ -390,7 +387,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
udelay(1);
WREG32_P(FVTHROT_PWM_CTRL_REG0,
- STARTING_PWM_HIGHTIME(pi->max_voltage),
+ STARTING_PWM_HIGHTIME(voltage),
~STARTING_PWM_HIGHTIME_MASK);
WREG32_P(FVTHROT_PWM_CTRL_REG0,
@@ -404,6 +401,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
}
+static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
+{
+ struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
+
+ if (current_state->sclk_low == current_state->sclk_high)
+ return;
+
+ WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
+
+ WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
+ ~FORCED_FEEDBACK_DIV_MASK);
+ WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
+ ~STARTING_FEEDBACK_DIV_MASK);
+ WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
+
+ udelay(100);
+
+ WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+}
+
static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps)
@@ -432,17 +449,13 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
if (ret)
return ret;
- WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
-
- WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div),
- ~FORCED_FEEDBACK_DIV_MASK);
- WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div),
- ~STARTING_FEEDBACK_DIV_MASK);
- WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
-
- udelay(100);
+ if ((min_dividers.ref_div != max_dividers.ref_div) ||
+ (min_dividers.post_div != max_dividers.post_div) ||
+ (max_dividers.ref_div != current_max_dividers.ref_div) ||
+ (max_dividers.post_div != current_max_dividers.post_div))
+ return -EINVAL;
- WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+ rs780_force_fbdiv(rdev, max_dividers.fb_div);
if (max_dividers.fb_div > min_dividers.fb_div) {
WREG32_P(FVTHROT_FBDIV_REG0,
@@ -486,6 +499,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
(new_state->sclk_low == old_state->sclk_low))
return;
+ if (new_state->sclk_high == new_state->sclk_low)
+ return;
+
rs780_clk_scaling_enable(rdev, true);
}
@@ -649,7 +665,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev)
rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
if (pi->voltage_control) {
- rs780_force_voltage_to_high(rdev);
+ rs780_force_voltage(rdev, pi->max_voltage);
mdelay(5);
}
@@ -717,14 +733,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else if (r600_is_uvd_state(rps->class, rps->class2)) {
- rps->vclk = RS780_DEFAULT_VCLK_FREQ;
- rps->dclk = RS780_DEFAULT_DCLK_FREQ;
} else {
rps->vclk = 0;
rps->dclk = 0;
}
+ if (r600_is_uvd_state(rps->class, rps->class2)) {
+ if ((rps->vclk == 0) || (rps->dclk == 0)) {
+ rps->vclk = RS780_DEFAULT_VCLK_FREQ;
+ rps->dclk = RS780_DEFAULT_DCLK_FREQ;
+ }
+ }
+
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
@@ -986,3 +1006,55 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
ps->sclk_high, ps->max_voltage);
}
+
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct igp_power_info *pi = rs780_get_pi(rdev);
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct igp_ps *ps = rs780_get_ps(rps);
+ struct atom_clock_dividers dividers;
+ int ret;
+
+ rs780_clk_scaling_enable(rdev, false);
+ rs780_voltage_scaling_enable(rdev, false);
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->max_voltage);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ ps->sclk_high, false, &dividers);
+ if (ret)
+ return ret;
+
+ rs780_force_fbdiv(rdev, dividers.fb_div);
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ ps->sclk_low, false, &dividers);
+ if (ret)
+ return ret;
+
+ rs780_force_fbdiv(rdev, dividers.fb_div);
+
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->min_voltage);
+ } else {
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->max_voltage);
+
+ if (ps->sclk_high != ps->sclk_low) {
+ WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
+ rs780_clk_scaling_enable(rdev, true);
+ }
+
+ if (pi->voltage_control) {
+ rs780_voltage_scaling_enable(rdev, true);
+ rs780_enable_voltage_scaling(rdev, rps);
+ }
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8ea1573..873eb4b 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev)
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
r = RREG32(MC_IND_DATA);
WREG32(MC_IND_INDEX, 0);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+
return r;
}
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
WREG32(MC_IND_DATA, (v));
WREG32(MC_IND_INDEX, 0);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index ab1f201..5811d27 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1758,8 +1758,6 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 8cbb85d..913b025 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2064,12 +2064,6 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
@@ -2147,14 +2141,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else if (r600_is_uvd_state(rps->class, rps->class2)) {
- rps->vclk = RV770_DEFAULT_VCLK_FREQ;
- rps->dclk = RV770_DEFAULT_DCLK_FREQ;
} else {
rps->vclk = 0;
rps->dclk = 0;
}
+ if (r600_is_uvd_state(rps->class, rps->class2)) {
+ if ((rps->vclk == 0) || (rps->dclk == 0)) {
+ rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+ rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+ }
+ }
+
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index ab95da5..b2a2244 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] =
0x08, 0x72, 0x08, 0x72
};
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
- u16 smc_address, u16 limit)
+static int rv770_set_smc_sram_address(struct radeon_device *rdev,
+ u16 smc_address, u16 limit)
{
u32 addr;
@@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
u16 smc_start_address, const u8 *src,
u16 byte_count, u16 limit)
{
+ unsigned long flags;
u32 data, original_data, extra_shift;
u16 addr;
- int ret;
+ int ret = 0;
if (smc_start_address & 3)
return -EINVAL;
@@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_SRAM_DATA, data);
@@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_SRAM_DATA);
@@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_SRAM_DATA, data);
}
- return 0;
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
{
+ unsigned long flags;
u16 i;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
for (i = 0; i < limit; i += 4) {
rv770_set_smc_sram_address(rdev, i, limit);
WREG32(SMC_SRAM_DATA, 0);
}
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
int rv770_load_smc_ucode(struct radeon_device *rdev,
@@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev,
int rv770_read_smc_sram_dword(struct radeon_device *rdev,
u16 smc_address, u32 *value, u16 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
-
- *value = RREG32(SMC_SRAM_DATA);
+ if (ret == 0)
+ *value = RREG32(SMC_SRAM_DATA);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- return 0;
+ return ret;
}
int rv770_write_smc_sram_dword(struct radeon_device *rdev,
u16 smc_address, u32 value, u16 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_SRAM_DATA, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_SRAM_DATA, value);
-
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index f78d92a..3b2c963 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
- u16 smc_address, u16 limit);
int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
u16 smc_start_address, const u8 *src,
u16 byte_count, u16 limit);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9fe60e5..1ae2771 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -852,7 +852,7 @@
#define AFMT_VBI_PACKET_CONTROL 0x7608
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x760c
-# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7610
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3e23b75..c354c10 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -83,6 +83,8 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
+static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable);
static const u32 verde_rlc_save_restore_register_list[] =
{
@@ -3386,6 +3388,8 @@ static int si_cp_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
+ si_enable_gui_idle_interrupt(rdev, false);
+
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
@@ -3501,6 +3505,8 @@ static int si_cp_resume(struct radeon_device *rdev)
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
+ si_enable_gui_idle_interrupt(rdev, true);
+
return 0;
}
@@ -4888,7 +4894,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 tmp;
- if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
WREG32(RLC_TTOP_D, tmp);
@@ -5250,6 +5256,7 @@ void si_update_cg(struct radeon_device *rdev,
u32 block, bool enable)
{
if (block & RADEON_CG_BLOCK_GFX) {
+ si_enable_gui_idle_interrupt(rdev, false);
/* order matters! */
if (enable) {
si_enable_mgcg(rdev, true);
@@ -5258,6 +5265,7 @@ void si_update_cg(struct radeon_device *rdev,
si_enable_cgcg(rdev, false);
si_enable_mgcg(rdev, false);
}
+ si_enable_gui_idle_interrupt(rdev, true);
}
if (block & RADEON_CG_BLOCK_MC) {
@@ -5408,7 +5416,7 @@ static void si_init_pg(struct radeon_device *rdev)
si_init_dma_pg(rdev);
}
si_init_ao_cu_mask(rdev);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
si_init_gfx_cgpg(rdev);
}
si_enable_dma_pg(rdev, true);
@@ -5560,7 +5568,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING0, tmp);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -5685,7 +5695,7 @@ static int si_irq_init(struct radeon_device *rdev)
int si_irq_set(struct radeon_device *rdev)
{
- u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 cp_int_cntl;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
@@ -5706,6 +5716,9 @@ int si_irq_set(struct radeon_device *rdev)
return 0;
}
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+
if (!ASIC_IS_NODCE(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 5be9b4e..cfe5d4d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6075,12 +6075,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("si_dpm_force_performance_level failed\n");
- return ret;
- }
-
si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 5f524c0..d422a1c 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -29,8 +29,8 @@
#include "ppsmc.h"
#include "radeon_ucode.h"
-int si_set_smc_sram_address(struct radeon_device *rdev,
- u32 smc_address, u32 limit)
+static int si_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
{
if (smc_address & 3)
return -EINVAL;
@@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
{
- int ret;
+ unsigned long flags;
+ int ret = 0;
u32 data, original_data, addr, extra_shift;
if (smc_start_address & 3)
@@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
@@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_IND_DATA_0);
@@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
}
- return 0;
+
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
void si_start_smc(struct radeon_device *rdev)
@@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{
+ unsigned long flags;
u32 ucode_start_address;
u32 ucode_size;
const u8 *src;
@@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
return -EINVAL;
src = (const u8 *)rdev->smc_fw->data;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) {
@@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_size -= 4;
}
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return 0;
}
@@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 *value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ *value = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- *value = RREG32(SMC_IND_DATA_0);
- return 0;
+ return ret;
}
int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_IND_DATA_0, value);
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 864761c..96ea6db8 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1319,8 +1319,6 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev)
if (pi->enable_dpm)
sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index b07b7b8..7f998bf 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev,
pi->requested_rps.ps_priv = &pi->requested_ps;
}
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+ if (pi->enable_bapm) {
+ trinity_acquire_mutex(rdev);
+ trinity_dpm_bapm_enable(rdev, enable);
+ trinity_release_mutex(rdev);
+ }
+}
+
int trinity_dpm_enable(struct radeon_device *rdev)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
@@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
trinity_program_sclk_dpm(rdev);
trinity_start_dpm(rdev);
trinity_wait_for_dpm_enabled(rdev);
+ trinity_dpm_bapm_enable(rdev, false);
trinity_release_mutex(rdev);
if (rdev->irq.installed &&
@@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev)
trinity_release_mutex(rdev);
return;
}
+ trinity_dpm_bapm_enable(rdev, false);
trinity_disable_clock_power_gating(rdev);
sumo_clear_vc(rdev);
trinity_wait_for_level_0(rdev);
@@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_acquire_mutex(rdev);
if (pi->enable_dpm) {
+ if (pi->enable_bapm)
+ trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
trinity_enable_power_level_0(rdev);
trinity_force_level_0(rdev);
@@ -1221,7 +1236,6 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_force_level_0(rdev);
trinity_unforce_levels(rdev);
trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
}
trinity_release_mutex(rdev);
@@ -1854,6 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
+ pi->enable_bapm = true;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index e82df07..c261657 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -108,6 +108,7 @@ struct trinity_power_info {
bool enable_auto_thermal_throttling;
bool enable_dpm;
bool enable_sclk_ds;
+ bool enable_bapm;
bool uvd_dpm;
struct radeon_ps current_rps;
struct trinity_ps current_ps;
@@ -118,6 +119,7 @@ struct trinity_power_info {
#define TRINITY_AT_DFLT 30
/* trinity_smc.c */
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
int trinity_dpm_config(struct radeon_device *rdev, bool enable);
int trinity_uvd_dpm_config(struct radeon_device *rdev);
int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index a42d89f..9672bcb 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
return 0;
}
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+ else
+ return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
int trinity_dpm_config(struct radeon_device *rdev, bool enable)
{
if (enable)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 58a5f32..a868176 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -218,7 +218,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_object_device *tdev = tfile->tdev;
- struct ttm_base_object *base;
+ struct ttm_base_object *uninitialized_var(base);
struct drm_hash_item *hash;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index bd2a3b4..863bef9 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -377,28 +377,26 @@ out:
return nr_free;
}
-/* Get good estimation how many pages are free in pools */
-static int ttm_pool_get_num_unused_pages(void)
-{
- unsigned i;
- int total = 0;
- for (i = 0; i < NUM_POOLS; ++i)
- total += _manager->pools[i].npages;
-
- return total;
-}
-
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
+ * this can deadlock when called a sc->gfp_mask that is not equal to
+ * GFP_KERNEL.
+ *
+ * This code is crying out for a shrinker per pool....
*/
-static int ttm_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
+ unsigned long freed = 0;
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
@@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
+ freed += nr_free - shrink_pages;
}
- /* return estimated number of unused pages in pool */
- return ttm_pool_get_num_unused_pages();
+ return freed;
+}
+
+
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned i;
+ unsigned long count = 0;
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ count += _manager->pools[i].npages;
+
+ return count;
}
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_pool_shrink_count;
+ manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index b8b3943..7957bee 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
-/* Get good estimation how many pages are free in pools */
-static int ttm_dma_pool_get_num_unused_pages(void)
-{
- struct device_pools *p;
- unsigned total = 0;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools)
- total += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return total;
-}
-
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
@@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
+ * needs to be paid to sc->gfp_mask to determine if this can be done or not.
+ * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
+ * bad.
+ *
+ * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+ * shrinkers
*/
-static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
+ unsigned long freed = 0;
if (list_empty(&_manager->pools))
- return 0;
+ return SHRINK_STOP;
mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
@@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ freed += nr_free - shrink_pages;
+
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
- /* return estimated number of unused pages in pool */
- return ttm_dma_pool_get_num_unused_pages();
+ return freed;
+}
+
+static unsigned long
+ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct device_pools *p;
+ unsigned long count = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ count += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return count;
}
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
+ manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5e93a52..210d503 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,7 +170,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm);
}
- if (likely(ttm->pages != NULL)) {
+ if (ttm->state == tt_unbound) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8dbe9d0..8bf6461 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -97,7 +97,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
switch (ret) {
case -EAGAIN:
- set_need_resched();
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index e893f6e..af02597 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -257,9 +257,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (!conflict->bridge_has_one_vga) {
vga_irq_set_state(conflict, false);
flags |= PCI_VGA_STATE_CHANGE_DECODES;
- if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
pci_bits |= PCI_COMMAND_MEMORY;
- if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO;
}
@@ -267,11 +267,11 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
- conflict->owns &= ~lwants;
+ conflict->owns &= ~match;
/* If he also owned non-legacy, that is no longer the case */
- if (lwants & VGA_RSRC_LEGACY_MEM)
+ if (match & VGA_RSRC_LEGACY_MEM)
conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
- if (lwants & VGA_RSRC_LEGACY_IO)
+ if (match & VGA_RSRC_LEGACY_IO)
conflict->owns &= ~VGA_RSRC_NORMAL_IO;
}
@@ -644,10 +644,12 @@ bail:
static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes)
{
- int old_decodes;
- struct vga_device *new_vgadev, *conflict;
+ int old_decodes, decodes_removed, decodes_unlocked;
old_decodes = vgadev->decodes;
+ decodes_removed = ~new_decodes & old_decodes;
+ decodes_unlocked = vgadev->locks & decodes_removed;
+ vgadev->owns &= ~decodes_removed;
vgadev->decodes = new_decodes;
pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@@ -656,31 +658,22 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns));
-
- /* if we own the decodes we should move them along to
- another card */
- if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
- /* set us to own nothing */
- vgadev->owns &= ~old_decodes;
- list_for_each_entry(new_vgadev, &vga_list, list) {
- if ((new_vgadev != vgadev) &&
- (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
- pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
- conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
- if (!conflict)
- __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
- break;
- }
- }
+ /* if we removed locked decodes, lock count goes to zero, and release */
+ if (decodes_unlocked) {
+ if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
+ vgadev->io_lock_cnt = 0;
+ if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
+ vgadev->mem_lock_cnt = 0;
+ __vga_put(vgadev, decodes_unlocked);
}
/* change decodes counter */
- if (old_decodes != new_decodes) {
- if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
- vga_decode_count++;
- else
- vga_decode_count--;
- }
+ if (old_decodes & VGA_RSRC_LEGACY_MASK &&
+ !(new_decodes & VGA_RSRC_LEGACY_MASK))
+ vga_decode_count--;
+ if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
+ new_decodes & VGA_RSRC_LEGACY_MASK)
+ vga_decode_count++;
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 3d7c9f6..71b70e3 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -773,7 +773,7 @@ config HID_ZYDACRON
config HID_SENSOR_HUB
tristate "HID Sensors framework support"
- depends on HID && GENERIC_HARDIRQS
+ depends on HID
select MFD_CORE
default n
---help---
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ae88a97..b8470b1 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -94,7 +94,6 @@ EXPORT_SYMBOL_GPL(hid_register_report);
static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
{
struct hid_field *field;
- int i;
if (report->maxfield == HID_MAX_FIELDS) {
hid_err(report->device, "too many fields in report\n");
@@ -113,9 +112,6 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
field->value = (s32 *)(field->usage + usages);
field->report = report;
- for (i = 0; i < usages; i++)
- field->usage[i].usage_index = i;
-
return field;
}
@@ -226,9 +222,9 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
- int usages;
+ unsigned usages;
unsigned offset;
- int i;
+ unsigned i;
report = hid_register_report(parser->device, report_type, parser->global.report_id);
if (!report) {
@@ -255,7 +251,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
if (!parser->local.usage_index) /* Ignore padding fields */
return 0;
- usages = max_t(int, parser->local.usage_index, parser->global.report_count);
+ usages = max_t(unsigned, parser->local.usage_index,
+ parser->global.report_count);
field = hid_register_field(report, usages, parser->global.report_count);
if (!field)
@@ -266,13 +263,14 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
for (i = 0; i < usages; i++) {
- int j = i;
+ unsigned j = i;
/* Duplicate the last usage we parsed if we have excess values */
if (i >= parser->local.usage_index)
j = parser->local.usage_index - 1;
field->usage[i].hid = parser->local.usage[j];
field->usage[i].collection_index =
parser->local.collection_index[j];
+ field->usage[i].usage_index = i;
}
field->maxusage = usages;
@@ -801,6 +799,64 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
}
EXPORT_SYMBOL_GPL(hid_parse_report);
+static const char * const hid_report_names[] = {
+ "HID_INPUT_REPORT",
+ "HID_OUTPUT_REPORT",
+ "HID_FEATURE_REPORT",
+};
+/**
+ * hid_validate_values - validate existing device report's value indexes
+ *
+ * @device: hid device
+ * @type: which report type to examine
+ * @id: which report ID to examine (0 for first)
+ * @field_index: which report field to examine
+ * @report_counts: expected number of values
+ *
+ * Validate the number of values in a given field of a given report, after
+ * parsing.
+ */
+struct hid_report *hid_validate_values(struct hid_device *hid,
+ unsigned int type, unsigned int id,
+ unsigned int field_index,
+ unsigned int report_counts)
+{
+ struct hid_report *report;
+
+ if (type > HID_FEATURE_REPORT) {
+ hid_err(hid, "invalid HID report type %u\n", type);
+ return NULL;
+ }
+
+ if (id >= HID_MAX_IDS) {
+ hid_err(hid, "invalid HID report id %u\n", id);
+ return NULL;
+ }
+
+ /*
+ * Explicitly not using hid_get_report() here since it depends on
+ * ->numbered being checked, which may not always be the case when
+ * drivers go to access report values.
+ */
+ report = hid->report_enum[type].report_id_hash[id];
+ if (!report) {
+ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
+ return NULL;
+ }
+ if (report->maxfield <= field_index) {
+ hid_err(hid, "not enough fields in %s %u\n",
+ hid_report_names[type], id);
+ return NULL;
+ }
+ if (report->field[field_index]->report_count < report_counts) {
+ hid_err(hid, "not enough values in %s %u field %u\n",
+ hid_report_names[type], id, field_index);
+ return NULL;
+ }
+ return report;
+}
+EXPORT_SYMBOL_GPL(hid_validate_values);
+
/**
* hid_open_report - open a driver-specific device report
*
@@ -1296,7 +1352,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
goto out;
}
- if (hid->claimed != HID_CLAIMED_HIDRAW) {
+ if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
for (a = 0; a < report->maxfield; a++)
hid_input_field(hid, report->field[a], cdata, interrupt);
hdrv = hid->driver;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b420f4a..8741d95 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -485,6 +485,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (field->flags & HID_MAIN_ITEM_CONSTANT)
goto ignore;
+ /* Ignore if report count is out of bounds. */
+ if (field->report_count < 1)
+ goto ignore;
+
/* only LED usages are supported in output fields */
if (field->report_type == HID_OUTPUT_REPORT &&
(usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
@@ -1236,7 +1240,11 @@ static void report_features(struct hid_device *hid)
rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list)
- for (i = 0; i < rep->maxfield; i++)
+ for (i = 0; i < rep->maxfield; i++) {
+ /* Ignore if report count is out of bounds. */
+ if (rep->field[i]->report_count < 1)
+ continue;
+
for (j = 0; j < rep->field[i]->maxusage; j++) {
/* Verify if Battery Strength feature is available */
hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
@@ -1245,6 +1253,7 @@ static void report_features(struct hid_device *hid)
drv->feature_mapping(hid, rep->field[i],
rep->field[i]->usage + j);
}
+ }
}
static struct hid_input *hidinput_allocate(struct hid_device *hid)
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index 07837f5..31cf29a 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -339,7 +339,15 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
struct tpkbd_data_pointer *data_pointer;
size_t name_sz = strlen(dev_name(dev)) + 16;
char *name_mute, *name_micmute;
- int ret;
+ int i, ret;
+
+ /* Validate required reports. */
+ for (i = 0; i < 4; i++) {
+ if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1))
+ return -ENODEV;
+ }
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2))
+ return -ENODEV;
if (sysfs_create_group(&hdev->dev.kobj,
&tpkbd_attr_group_pointer)) {
@@ -406,22 +414,27 @@ static int tpkbd_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "hid_parse failed\n");
- goto err_free;
+ goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hid_hw_start failed\n");
- goto err_free;
+ goto err;
}
uhdev = (struct usbhid_device *) hdev->driver_data;
- if (uhdev->ifnum == 1)
- return tpkbd_probe_tp(hdev);
+ if (uhdev->ifnum == 1) {
+ ret = tpkbd_probe_tp(hdev);
+ if (ret)
+ goto err_hid;
+ }
return 0;
-err_free:
+err_hid:
+ hid_hw_stop(hdev);
+err:
return ret;
}
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index b3cd150..1a42eaa 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
- struct list_head *report_list =
- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
int error;
- if (list_empty(report_list)) {
- hid_err(hid, "no output report found\n");
+ /* Check that the report looks ok */
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
+ if (!report)
return -ENODEV;
- }
-
- report = list_entry(report_list->next, struct hid_report, list);
-
- if (report->maxfield < 1) {
- hid_err(hid, "output report is empty\n");
- return -ENODEV;
- }
- if (report->field[0]->report_count < 7) {
- hid_err(hid, "not enough values in the field\n");
- return -ENODEV;
- }
lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
if (!lg2ff)
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index e52f181..8c2da18 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
int x, y;
/*
- * Maxusage should always be 63 (maximum fields)
- * likely a better way to ensure this data is clean
+ * Available values in the field should always be 63, but we only use up to
+ * 35. Instead, clear the entire area, however big it is.
*/
- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
+ memset(report->field[0]->value, 0,
+ sizeof(__s32) * report->field[0]->report_count);
switch (effect->type) {
case FF_CONSTANT:
@@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
int lg3ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- if (!report) {
- hid_err(hid, "NULL output report\n");
- return -1;
- }
-
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
+ return -ENODEV;
/* Assume single fixed device G940 */
for (i = 0; ff_bits[i] >= 0; i++)
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 0ddae2a..8782fe1 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
int lg4ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
struct usb_device_descriptor *udesc;
int error, i, j;
__u16 bcdDevice, rev_maj, rev_min;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- if (!report) {
- hid_err(hid, "NULL output report\n");
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;
- }
-
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
/* Check what wheel has been connected */
for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index d7ea8c8..e1394af 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
int lgff_init(struct hid_device* hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
const signed short *ff_bits = ff_joystick;
int error;
int i;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -ENODEV;
for (i = 0; i < ARRAY_SIZE(devices); i++) {
if (dev->id.vendor == devices[i].idVendor &&
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 7800b14..2e53024 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -461,7 +461,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct hid_report *report;
struct hid_report_enum *output_report_enum;
u8 *data = (u8 *)(&dj_report->device_index);
- int i;
+ unsigned int i;
output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
@@ -471,7 +471,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
return -ENODEV;
}
- for (i = 0; i < report->field[0]->report_count; i++)
+ for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++)
report->field[0]->value[i] = data[i];
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
@@ -791,6 +791,12 @@ static int logi_dj_probe(struct hid_device *hdev,
goto hid_parse_fail;
}
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
+ 0, DJREPORT_SHORT_LENGTH - 1)) {
+ retval = -ENODEV;
+ goto hid_parse_fail;
+ }
+
/* Starts the usb device and connects to upper interfaces hiddev and
* hidraw */
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index ac28f08..5e5fe1b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -101,9 +101,9 @@ struct mt_device {
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
unsigned pen_report_id; /* the report ID of the pen device */
- __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
- __s8 inputmode_index; /* InputMode HID feature index in the report */
- __s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
+ __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
+ __s16 inputmode_index; /* InputMode HID feature index in the report */
+ __s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
-1 if non-existent */
__u8 num_received; /* how many contacts we received */
__u8 num_expected; /* expected last contact index */
@@ -312,20 +312,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
struct mt_device *td = hid_get_drvdata(hdev);
- int i;
switch (usage->hid) {
case HID_DG_INPUTMODE:
- td->inputmode = field->report->id;
- td->inputmode_index = 0; /* has to be updated below */
-
- for (i=0; i < field->maxusage; i++) {
- if (field->usage[i].hid == usage->hid) {
- td->inputmode_index = i;
- break;
- }
+ /* Ignore if value index is out of bounds. */
+ if (usage->usage_index >= field->report_count) {
+ dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
+ break;
}
+ td->inputmode = field->report->id;
+ td->inputmode_index = usage->usage_index;
+
break;
case HID_DG_CONTACTMAX:
td->maxcontact_report_id = field->report->id;
@@ -511,6 +509,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
mt_store_field(usage, td, hi);
return 1;
case HID_DG_CONTACTCOUNT:
+ /* Ignore if indexes are out of bounds. */
+ if (field->index >= field->report->maxfield ||
+ usage->usage_index >= field->report_count)
+ return 1;
td->cc_index = field->index;
td->cc_value_index = usage->usage_index;
return 1;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 30dbb6b..b18320d 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -537,6 +537,10 @@ static int buzz_init(struct hid_device *hdev)
drv_data = hid_get_drvdata(hdev);
BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER));
+ /* Validate expected report characteristics. */
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -ENODEV;
+
buzz = kzalloc(sizeof(*buzz), GFP_KERNEL);
if (!buzz) {
hid_err(hdev, "Insufficient memory, cannot allocate driver data\n");
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index d164911..29f328f 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
goto err_free;
}
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index 6ec28a3..a29756c 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -68,21 +68,13 @@ static int zpff_init(struct hid_device *hid)
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
- struct list_head *report_list =
- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- int error;
+ int i, error;
- if (list_empty(report_list)) {
- hid_err(hid, "no output report found\n");
- return -ENODEV;
- }
-
- report = list_entry(report_list->next, struct hid_report, list);
-
- if (report->maxfield < 4) {
- hid_err(hid, "not enough fields in report\n");
- return -ENODEV;
+ for (i = 0; i < 4; i++) {
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
+ if (!report)
+ return -ENODEV;
}
zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 4fe49d2..eea8172 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -364,7 +364,7 @@ static ssize_t set_pwm1_enable(
if (config < 0) {
dev_err(&client->dev,
"Error reading configuration register, aborting.\n");
- return -EIO;
+ return config;
}
switch (val) {
@@ -416,11 +416,9 @@ static ssize_t get_temp_auto_point_temp(
case 1:
return sprintf(buf, "%d\n",
data->temp1_auto_point_temp[ix] * 1000);
- break;
case 2:
return sprintf(buf, "%d\n",
data->temp2_auto_point_temp[ix] * 1000);
- break;
default:
dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
return -EINVAL;
@@ -513,7 +511,6 @@ static ssize_t set_temp_auto_point_temp(
count = -EIO;
}
goto EXIT;
- break;
case 1:
ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124);
ptemp[1] &= 0x7C;
@@ -665,7 +662,7 @@ static ssize_t set_fan1_div(
if (config < 0) {
dev_err(&client->dev,
"Error reading configuration register, aborting.\n");
- return -EIO;
+ return config;
}
mutex_lock(&data->update_lock);
switch (val) {
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index b073056..2c137b2 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -248,7 +248,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
int result = kstrtol(buf, 10, &val);
if (result < 0)
- return -EINVAL;
+ return result;
val = DIV_ROUND_CLOSEST(val, 1000);
if ((val < -63) || (val > 127))
@@ -272,7 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
int result = kstrtol(buf, 10, &val);
if (result < 0)
- return -EINVAL;
+ return result;
val = DIV_ROUND_CLOSEST(val, 1000);
if ((val < -63) || (val > 127))
@@ -320,7 +320,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
int status = kstrtol(buf, 10, &new_div);
if (status < 0)
- return -EINVAL;
+ return status;
if (new_div == old_div) /* No change */
return count;
@@ -394,7 +394,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
int result = kstrtol(buf, 10, &rpm_target);
if (result < 0)
- return -EINVAL;
+ return result;
/* Datasheet states 16384 as maximum RPM target (table 3.2) */
if ((rpm_target < 0) || (rpm_target > 16384))
@@ -440,7 +440,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
int result = kstrtol(buf, 10, &new_value);
if (result < 0)
- return -EINVAL;
+ return result;
mutex_lock(&data->update_lock);
switch (new_value) {
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 89cfd64..ef91b8a 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -246,7 +246,7 @@ static struct vrm_model vrm_models[] = {
*/
static u8 get_via_model_d_vrm(void)
{
- unsigned int vid, brand, dummy;
+ unsigned int vid, brand, __maybe_unused dummy;
static const char *brands[4] = {
"C7-M", "C7", "Eden", "C7-D"
};
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index e2b56a2..632f1dc 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -292,7 +292,7 @@ static int aem_init_ipmi_data(struct aem_ipmi_data *data, int iface,
dev_err(bmc,
"Unable to register user with IPMI interface %d\n",
data->interface);
- return -EACCES;
+ return err;
}
return 0;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 18c0623..70a39a8 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -233,8 +233,7 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENOMEM;
if (dev_get_platdata(&client->dev)) {
- pdata =
- (struct ina2xx_platform_data *)dev_get_platdata(&client->dev);
+ pdata = dev_get_platdata(&client->dev);
shunt = pdata->shunt_uohms;
} else if (!of_property_read_u32(client->dev.of_node,
"shunt-resistor", &val)) {
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index e633856..d65f3fd 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -202,7 +202,6 @@ static void k10temp_remove(struct pci_dev *pdev)
&sensor_dev_attr_temp1_crit.dev_attr);
device_remove_file(&pdev->dev,
&sensor_dev_attr_temp1_crit_hyst.dev_attr);
- pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 964c1d6..ae26b06 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -210,7 +210,7 @@ static int tmp421_init_client(struct i2c_client *client)
if (config < 0) {
dev_err(&client->dev,
"Could not read configuration register (%d)\n", config);
- return -ENODEV;
+ return config;
}
config_orig = config;
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index e380c6e..7b7ea32 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -75,7 +75,6 @@ config I2C_HELPER_AUTO
config I2C_SMBUS
tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
- depends on GENERIC_HARDIRQS
help
Say Y here if you want support for SMBus extensions to the I2C
specification. At the moment, the only supported extension is
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index fcdd321..cdcbd83 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -115,7 +115,7 @@ config I2C_I801
config I2C_ISCH
tristate "Intel SCH SMBus 1.0"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
select LPC_SCH
help
Say Y here if you want to use SMBus controller on the Intel SCH
@@ -546,7 +546,6 @@ config I2C_NUC900
config I2C_OCORES
tristate "OpenCores I2C Controller"
- depends on GENERIC_HARDIRQS
help
If you say yes to this option, support will be included for the
OpenCores I2C controller. For details see
@@ -791,7 +790,7 @@ config I2C_DIOLAN_U2C
config I2C_PARPORT
tristate "Parallel port adapter"
- depends on PARPORT && GENERIC_HARDIRQS
+ depends on PARPORT
select I2C_ALGOBIT
select I2C_SMBUS
help
@@ -816,7 +815,6 @@ config I2C_PARPORT
config I2C_PARPORT_LIGHT
tristate "Parallel port adapter (light)"
- depends on GENERIC_HARDIRQS
select I2C_ALGOBIT
select I2C_SMBUS
help
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 5747341..132369f 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -662,7 +662,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
#endif
dev->dev = &pdev->dev;
dev->irq = irq->start;
- dev->pdata = dev_get_platdata(&dev->dev);
+ dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev);
if (!dev->pdata && pdev->dev.of_node) {
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index cbea327..90cf0cd 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -4,7 +4,6 @@
menuconfig IIO
tristate "Industrial I/O support"
- depends on GENERIC_HARDIRQS
help
The industrial I/O subsystem provides a unified framework for
drivers for many different types of embedded sensors using a
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index d03ca4c..495be09 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -8,7 +8,7 @@ config INFINIBAND_QIB
config INFINIBAND_QIB_DCA
bool "QIB DCA support"
- depends on INFINIBAND_QIB && DCA && SMP && GENERIC_HARDIRQS && !(INFINIBAND_QIB=y && DCA=m)
+ depends on INFINIBAND_QIB && DCA && SMP && !(INFINIBAND_QIB=y && DCA=m)
default y
---help---
Setting this enables DCA support on some Intel chip sets
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3f62041..3591855 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1,7 +1,7 @@
/*******************************************************************************
* This file contains iSCSI extentions for RDMA (iSER) Verbs
*
- * (c) Copyright 2013 RisingTide Systems LLC.
+ * (c) Copyright 2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -39,7 +39,17 @@ static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
static struct workqueue_struct *isert_rx_wq;
static struct workqueue_struct *isert_comp_wq;
-static struct kmem_cache *isert_cmd_cache;
+
+static void
+isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+static int
+isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr);
+static void
+isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+static int
+isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr);
static void
isert_qp_event_callback(struct ib_event *e, void *context)
@@ -80,14 +90,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
{
struct isert_device *device = isert_conn->conn_device;
struct ib_qp_init_attr attr;
- struct ib_device_attr devattr;
int ret, index, min_index = 0;
- memset(&devattr, 0, sizeof(struct ib_device_attr));
- ret = isert_query_device(cma_id->device, &devattr);
- if (ret)
- return ret;
-
mutex_lock(&device_list_mutex);
for (index = 0; index < device->cqs_used; index++)
if (device->cq_active_qps[index] <
@@ -108,7 +112,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
* FIXME: Use devattr.max_sge - 2 for max_send_sge as
* work-around for RDMA_READ..
*/
- attr.cap.max_send_sge = devattr.max_sge - 2;
+ attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
isert_conn->max_sge = attr.cap.max_send_sge;
attr.cap.max_recv_sge = 1;
@@ -210,14 +214,31 @@ isert_create_device_ib_res(struct isert_device *device)
{
struct ib_device *ib_dev = device->ib_device;
struct isert_cq_desc *cq_desc;
+ struct ib_device_attr *dev_attr;
int ret = 0, i, j;
+ dev_attr = &device->dev_attr;
+ ret = isert_query_device(ib_dev, dev_attr);
+ if (ret)
+ return ret;
+
+ /* asign function handlers */
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ device->use_frwr = 1;
+ device->reg_rdma_mem = isert_reg_rdma_frwr;
+ device->unreg_rdma_mem = isert_unreg_rdma_frwr;
+ } else {
+ device->use_frwr = 0;
+ device->reg_rdma_mem = isert_map_rdma;
+ device->unreg_rdma_mem = isert_unmap_cmd;
+ }
+
device->cqs_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
- pr_debug("Using %d CQs, device %s supports %d vectors\n",
+ pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
device->cqs_used, device->ib_device->name,
- device->ib_device->num_comp_vectors);
+ device->ib_device->num_comp_vectors, device->use_frwr);
device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
device->cqs_used, GFP_KERNEL);
if (!device->cq_desc) {
@@ -363,6 +384,85 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
return device;
}
+static void
+isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
+{
+ struct fast_reg_descriptor *fr_desc, *tmp;
+ int i = 0;
+
+ if (list_empty(&isert_conn->conn_frwr_pool))
+ return;
+
+ pr_debug("Freeing conn %p frwr pool", isert_conn);
+
+ list_for_each_entry_safe(fr_desc, tmp,
+ &isert_conn->conn_frwr_pool, list) {
+ list_del(&fr_desc->list);
+ ib_free_fast_reg_page_list(fr_desc->data_frpl);
+ ib_dereg_mr(fr_desc->data_mr);
+ kfree(fr_desc);
+ ++i;
+ }
+
+ if (i < isert_conn->conn_frwr_pool_size)
+ pr_warn("Pool still has %d regions registered\n",
+ isert_conn->conn_frwr_pool_size - i);
+}
+
+static int
+isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
+{
+ struct fast_reg_descriptor *fr_desc;
+ struct isert_device *device = isert_conn->conn_device;
+ int i, ret;
+
+ INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
+ isert_conn->conn_frwr_pool_size = 0;
+ for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
+ fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
+ if (!fr_desc) {
+ pr_err("Failed to allocate fast_reg descriptor\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ fr_desc->data_frpl =
+ ib_alloc_fast_reg_page_list(device->ib_device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(fr_desc->data_frpl)) {
+ pr_err("Failed to allocate fr_pg_list err=%ld\n",
+ PTR_ERR(fr_desc->data_frpl));
+ ret = PTR_ERR(fr_desc->data_frpl);
+ goto err;
+ }
+
+ fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(fr_desc->data_mr)) {
+ pr_err("Failed to allocate frmr err=%ld\n",
+ PTR_ERR(fr_desc->data_mr));
+ ret = PTR_ERR(fr_desc->data_mr);
+ ib_free_fast_reg_page_list(fr_desc->data_frpl);
+ goto err;
+ }
+ pr_debug("Create fr_desc %p page_list %p\n",
+ fr_desc, fr_desc->data_frpl->page_list);
+
+ fr_desc->valid = true;
+ list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+ isert_conn->conn_frwr_pool_size++;
+ }
+
+ pr_debug("Creating conn %p frwr pool size=%d",
+ isert_conn, isert_conn->conn_frwr_pool_size);
+
+ return 0;
+
+err:
+ isert_conn_free_frwr_pool(isert_conn);
+ return ret;
+}
+
static int
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
@@ -389,6 +489,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
+ spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
@@ -446,6 +547,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->conn_pd = device->dev_pd;
isert_conn->conn_mr = device->dev_mr;
+ if (device->use_frwr) {
+ ret = isert_conn_create_frwr_pool(isert_conn);
+ if (ret) {
+ pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
+ goto out_frwr;
+ }
+ }
+
ret = isert_conn_setup_qp(isert_conn, cma_id);
if (ret)
goto out_conn_dev;
@@ -459,6 +568,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
return 0;
out_conn_dev:
+ if (device->use_frwr)
+ isert_conn_free_frwr_pool(isert_conn);
+out_frwr:
isert_device_try_release(device);
out_rsp_dma_map:
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@ -482,6 +594,9 @@ isert_connect_release(struct isert_conn *isert_conn)
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ if (device->use_frwr)
+ isert_conn_free_frwr_pool(isert_conn);
+
if (isert_conn->conn_qp) {
cq_index = ((struct isert_cq_desc *)
isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
@@ -869,46 +984,37 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
size, rx_buflen, MAX_KEY_VALUE_PAIRS);
memcpy(login->req_buf, &rx_desc->data[0], size);
- complete(&isert_conn->conn_login_comp);
-}
-
-static void
-isert_release_cmd(struct iscsi_cmd *cmd)
-{
- struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
- iscsi_cmd);
-
- pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
-
- kfree(cmd->buf_ptr);
- kfree(cmd->tmr_req);
-
- kmem_cache_free(isert_cmd_cache, isert_cmd);
+ if (login->first_request) {
+ complete(&isert_conn->conn_login_comp);
+ return;
+ }
+ schedule_delayed_work(&conn->login_work, 0);
}
static struct iscsi_cmd
-*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
+*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
{
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct isert_cmd *isert_cmd;
+ struct iscsi_cmd *cmd;
- isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
- if (!isert_cmd) {
- pr_err("Unable to allocate isert_cmd\n");
+ cmd = iscsit_allocate_cmd(conn, gfp);
+ if (!cmd) {
+ pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
return NULL;
}
+ isert_cmd = iscsit_priv_cmd(cmd);
isert_cmd->conn = isert_conn;
- isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
+ isert_cmd->iscsi_cmd = cmd;
- return &isert_cmd->iscsi_cmd;
+ return cmd;
}
static int
isert_handle_scsi_cmd(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
- unsigned char *buf)
+ struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
+ struct iser_rx_desc *rx_desc, unsigned char *buf)
{
- struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
struct scatterlist *sg;
@@ -1015,9 +1121,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
static int
isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct iser_rx_desc *rx_desc, unsigned char *buf)
+ struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
+ unsigned char *buf)
{
- struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
int rc;
@@ -1034,9 +1140,9 @@ isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
static int
isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct iser_rx_desc *rx_desc, struct iscsi_text *hdr)
+ struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
+ struct iscsi_text *hdr)
{
- struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct iscsi_conn *conn = isert_conn->conn;
u32 payload_length = ntoh24(hdr->dlength);
int rc;
@@ -1081,26 +1187,26 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
switch (opcode) {
case ISCSI_OP_SCSI_CMD:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
- isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
+ isert_cmd = iscsit_priv_cmd(cmd);
isert_cmd->read_stag = read_stag;
isert_cmd->read_va = read_va;
isert_cmd->write_stag = write_stag;
isert_cmd->write_va = write_va;
- ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
+ ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_NOOP_OUT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
- isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
- ret = isert_handle_nop_out(isert_conn, isert_cmd,
+ isert_cmd = iscsit_priv_cmd(cmd);
+ ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_DATA_OUT:
@@ -1108,7 +1214,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_TMFUNC:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
@@ -1116,7 +1222,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_LOGOUT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
@@ -1127,12 +1233,12 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
HZ);
break;
case ISCSI_OP_TEXT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
break;
- isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
- ret = isert_handle_text_cmd(isert_conn, isert_cmd,
+ isert_cmd = iscsit_priv_cmd(cmd);
+ ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
rx_desc, (struct iscsi_text *)hdr);
break;
default:
@@ -1243,26 +1349,65 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
+ pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
+ if (wr->sge) {
+ pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
+ ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
+ (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ wr->sge = NULL;
+ }
+
+ if (wr->send_wr) {
+ pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
+ kfree(wr->send_wr);
+ wr->send_wr = NULL;
+ }
+
+ if (wr->ib_sge) {
+ pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
+ kfree(wr->ib_sge);
+ wr->ib_sge = NULL;
+ }
+}
+
+static void
+isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+{
+ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ LIST_HEAD(unmap_list);
+
+ pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
+
+ if (wr->fr_desc) {
+ pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
+ isert_cmd, wr->fr_desc);
+ spin_lock_bh(&isert_conn->conn_lock);
+ list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
+ spin_unlock_bh(&isert_conn->conn_lock);
+ wr->fr_desc = NULL;
+ }
if (wr->sge) {
- ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
+ pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
+ ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
+ (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
wr->sge = NULL;
}
- kfree(wr->send_wr);
+ wr->ib_sge = NULL;
wr->send_wr = NULL;
-
- kfree(isert_cmd->ib_sge);
- isert_cmd->ib_sge = NULL;
}
static void
isert_put_cmd(struct isert_cmd *isert_cmd)
{
- struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsi_conn *conn = isert_conn->conn;
+ struct isert_device *device = isert_conn->conn_device;
pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
@@ -1276,7 +1421,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
- isert_unmap_cmd(isert_cmd, isert_conn);
+ device->unreg_rdma_mem(isert_cmd, isert_conn);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_SCSI_TMFUNC:
@@ -1311,7 +1456,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
* Fall-through
*/
default:
- isert_release_cmd(cmd);
+ iscsit_release_cmd(cmd);
break;
}
}
@@ -1347,27 +1492,16 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
struct isert_cmd *isert_cmd)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
+ struct isert_conn *isert_conn = isert_cmd->conn;
+ struct isert_device *device = isert_conn->conn_device;
iscsit_stop_dataout_timer(cmd);
+ device->unreg_rdma_mem(isert_cmd, isert_conn);
+ cmd->write_data_done = wr->cur_rdma_length;
- if (wr->sge) {
- pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
- ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
- wr->sge = NULL;
- }
-
- if (isert_cmd->ib_sge) {
- pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
- kfree(isert_cmd->ib_sge);
- isert_cmd->ib_sge = NULL;
- }
-
- cmd->write_data_done = se_cmd->data_length;
-
- pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
+ pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1383,7 +1517,7 @@ isert_do_control_comp(struct work_struct *work)
struct isert_cmd, comp_work);
struct isert_conn *isert_conn = isert_cmd->conn;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
@@ -1429,7 +1563,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct isert_conn *isert_conn,
struct ib_device *ib_dev)
{
- struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1621,8 +1755,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
- struct isert_cmd *isert_cmd = container_of(cmd,
- struct isert_cmd, iscsi_cmd);
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
@@ -1671,8 +1804,7 @@ static int
isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
bool nopout_response)
{
- struct isert_cmd *isert_cmd = container_of(cmd,
- struct isert_cmd, iscsi_cmd);
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
@@ -1691,8 +1823,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
static int
isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
- struct isert_cmd *isert_cmd = container_of(cmd,
- struct isert_cmd, iscsi_cmd);
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
@@ -1710,8 +1841,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
static int
isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
- struct isert_cmd *isert_cmd = container_of(cmd,
- struct isert_cmd, iscsi_cmd);
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
@@ -1729,8 +1859,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
static int
isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
- struct isert_cmd *isert_cmd = container_of(cmd,
- struct isert_cmd, iscsi_cmd);
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
@@ -1762,8 +1891,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
static int
isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
- struct isert_cmd *isert_cmd = container_of(cmd,
- struct isert_cmd, iscsi_cmd);
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_text_rsp *hdr =
@@ -1805,7 +1933,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
u32 data_left, u32 offset)
{
- struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct scatterlist *sg_start, *tmp_sg;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
u32 sg_off, page_off;
@@ -1832,8 +1960,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
ib_sge->lkey = isert_conn->conn_mr->lkey;
- pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n",
- ib_sge->addr, ib_sge->length);
+ pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
+ ib_sge->addr, ib_sge->length, ib_sge->lkey);
page_off = 0;
data_left -= ib_sge->length;
ib_sge++;
@@ -1847,200 +1975,373 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
}
static int
-isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_cmd *isert_cmd = container_of(cmd,
- struct isert_cmd, iscsi_cmd);
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct ib_send_wr *wr_failed, *send_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_send_wr *send_wr;
struct ib_sge *ib_sge;
- struct scatterlist *sg;
- u32 offset = 0, data_len, data_left, rdma_write_max;
- int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
-
- pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
+ struct scatterlist *sg_start;
+ u32 sg_off = 0, sg_nents;
+ u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
+ int ret = 0, count, i, ib_sge_cnt;
+
+ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ data_left = se_cmd->data_length;
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
+ } else {
+ sg_off = cmd->write_data_done / PAGE_SIZE;
+ data_left = se_cmd->data_length - cmd->write_data_done;
+ offset = cmd->write_data_done;
+ isert_cmd->tx_desc.isert_cmd = isert_cmd;
+ }
- sg = &se_cmd->t_data_sg[0];
- sg_nents = se_cmd->t_data_nents;
+ sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+ sg_nents = se_cmd->t_data_nents - sg_off;
- count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
+ count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
+ (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (unlikely(!count)) {
- pr_err("Unable to map put_datain SGs\n");
+ pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
return -EINVAL;
}
- wr->sge = sg;
+ wr->sge = sg_start;
wr->num_sge = sg_nents;
- pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
- count, sg, sg_nents);
+ wr->cur_rdma_length = data_left;
+ pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+ isert_cmd, count, sg_start, sg_nents, data_left);
ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
if (!ib_sge) {
- pr_warn("Unable to allocate datain ib_sge\n");
+ pr_warn("Unable to allocate ib_sge\n");
ret = -ENOMEM;
goto unmap_sg;
}
- isert_cmd->ib_sge = ib_sge;
-
- pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
- ib_sge, se_cmd->t_data_nents);
+ wr->ib_sge = ib_sge;
wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
GFP_KERNEL);
if (!wr->send_wr) {
- pr_err("Unable to allocate wr->send_wr\n");
+ pr_debug("Unable to allocate wr->send_wr\n");
ret = -ENOMEM;
goto unmap_sg;
}
- pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
- wr->send_wr, wr->send_wr_num);
-
- iscsit_increment_maxcmdsn(cmd, conn->sess);
- cmd->stat_sn = conn->stat_sn++;
wr->isert_cmd = isert_cmd;
rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
- data_left = se_cmd->data_length;
for (i = 0; i < wr->send_wr_num; i++) {
send_wr = &isert_cmd->rdma_wr.send_wr[i];
data_len = min(data_left, rdma_write_max);
- send_wr->opcode = IB_WR_RDMA_WRITE;
send_wr->send_flags = 0;
- send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
- send_wr->wr.rdma.rkey = isert_cmd->read_stag;
+ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ send_wr->opcode = IB_WR_RDMA_WRITE;
+ send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
+ send_wr->wr.rdma.rkey = isert_cmd->read_stag;
+ if (i + 1 == wr->send_wr_num)
+ send_wr->next = &isert_cmd->tx_desc.send_wr;
+ else
+ send_wr->next = &wr->send_wr[i + 1];
+ } else {
+ send_wr->opcode = IB_WR_RDMA_READ;
+ send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
+ send_wr->wr.rdma.rkey = isert_cmd->write_stag;
+ if (i + 1 == wr->send_wr_num)
+ send_wr->send_flags = IB_SEND_SIGNALED;
+ else
+ send_wr->next = &wr->send_wr[i + 1];
+ }
ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
send_wr, data_len, offset);
ib_sge += ib_sge_cnt;
- if (i + 1 == wr->send_wr_num)
- send_wr->next = &isert_cmd->tx_desc.send_wr;
- else
- send_wr->next = &wr->send_wr[i + 1];
-
offset += data_len;
+ va_offset += data_len;
data_left -= data_len;
}
- /*
- * Build isert_conn->tx_desc for iSCSI response PDU and attach
- */
- isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
- iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
- &isert_cmd->tx_desc.iscsi_header);
- isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
- atomic_inc(&isert_conn->post_send_buf_count);
+ return 0;
+unmap_sg:
+ ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
+ (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ return ret;
+}
- rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
- if (rc) {
- pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+static int
+isert_map_fr_pagelist(struct ib_device *ib_dev,
+ struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
+{
+ u64 start_addr, end_addr, page, chunk_start = 0;
+ struct scatterlist *tmp_sg;
+ int i = 0, new_chunk, last_ent, n_pages;
+
+ n_pages = 0;
+ new_chunk = 1;
+ last_ent = sg_nents - 1;
+ for_each_sg(sg_start, tmp_sg, sg_nents, i) {
+ start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
+ if (new_chunk)
+ chunk_start = start_addr;
+ end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
+
+ pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
+ i, (unsigned long long)tmp_sg->dma_address,
+ tmp_sg->length);
+
+ if ((end_addr & ~PAGE_MASK) && i < last_ent) {
+ new_chunk = 0;
+ continue;
+ }
+ new_chunk = 1;
+
+ page = chunk_start & PAGE_MASK;
+ do {
+ fr_pl[n_pages++] = page;
+ pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
+ n_pages - 1, page);
+ page += PAGE_SIZE;
+ } while (page < end_addr);
}
- pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
- return 1;
-unmap_sg:
- ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
+ return n_pages;
+}
+
+static int
+isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
+ struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
+ struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
+{
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct scatterlist *sg_start;
+ u32 sg_off, page_off;
+ struct ib_send_wr fr_wr, inv_wr;
+ struct ib_send_wr *bad_wr, *wr = NULL;
+ u8 key;
+ int ret, sg_nents, pagelist_len;
+
+ sg_off = offset / PAGE_SIZE;
+ sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+ sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
+ ISCSI_ISER_SG_TABLESIZE);
+ page_off = offset % PAGE_SIZE;
+
+ pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
+ isert_cmd, fr_desc, sg_nents, sg_off, offset);
+
+ pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
+ &fr_desc->data_frpl->page_list[0]);
+
+ if (!fr_desc->valid) {
+ memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
+ wr = &inv_wr;
+ /* Bump the key */
+ key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(fr_desc->data_mr, ++key);
+ }
+
+ /* Prepare FASTREG WR */
+ memset(&fr_wr, 0, sizeof(fr_wr));
+ fr_wr.opcode = IB_WR_FAST_REG_MR;
+ fr_wr.wr.fast_reg.iova_start =
+ fr_desc->data_frpl->page_list[0] + page_off;
+ fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
+ fr_wr.wr.fast_reg.page_list_len = pagelist_len;
+ fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
+ fr_wr.wr.fast_reg.length = data_len;
+ fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
+ fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
+
+ if (!wr)
+ wr = &fr_wr;
+ else
+ wr->next = &fr_wr;
+
+ ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+ if (ret) {
+ pr_err("fast registration failed, ret:%d\n", ret);
+ return ret;
+ }
+ fr_desc->valid = false;
+
+ ib_sge->lkey = fr_desc->data_mr->lkey;
+ ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
+ ib_sge->length = data_len;
+
+ pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
+ ib_sge->addr, ib_sge->length, ib_sge->lkey);
+
return ret;
}
static int
-isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_cmd *isert_cmd = container_of(cmd,
- struct isert_cmd, iscsi_cmd);
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct ib_send_wr *wr_failed, *send_wr;
- struct ib_sge *ib_sge;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_send_wr *send_wr;
+ struct ib_sge *ib_sge;
struct scatterlist *sg_start;
- u32 sg_off, sg_nents, page_off, va_offset = 0;
+ struct fast_reg_descriptor *fr_desc;
+ u32 sg_off = 0, sg_nents;
u32 offset = 0, data_len, data_left, rdma_write_max;
- int rc, ret = 0, count, i, ib_sge_cnt;
+ int ret = 0, count;
+ unsigned long flags;
- pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
- se_cmd->data_length, cmd->write_data_done);
+ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ data_left = se_cmd->data_length;
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
+ } else {
+ sg_off = cmd->write_data_done / PAGE_SIZE;
+ data_left = se_cmd->data_length - cmd->write_data_done;
+ offset = cmd->write_data_done;
+ isert_cmd->tx_desc.isert_cmd = isert_cmd;
+ }
- sg_off = cmd->write_data_done / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- page_off = cmd->write_data_done % PAGE_SIZE;
-
- pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
- sg_off, sg_start, page_off);
-
- data_left = se_cmd->data_length - cmd->write_data_done;
sg_nents = se_cmd->t_data_nents - sg_off;
- pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
- data_left, sg_nents);
-
- count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
+ count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
+ (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (unlikely(!count)) {
- pr_err("Unable to map get_dataout SGs\n");
+ pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
return -EINVAL;
}
wr->sge = sg_start;
wr->num_sge = sg_nents;
- pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
- count, sg_start, sg_nents);
+ pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+ isert_cmd, count, sg_start, sg_nents, data_left);
- ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
- if (!ib_sge) {
- pr_warn("Unable to allocate dataout ib_sge\n");
- ret = -ENOMEM;
- goto unmap_sg;
+ memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
+ ib_sge = &wr->s_ib_sge;
+ wr->ib_sge = ib_sge;
+
+ wr->send_wr_num = 1;
+ memset(&wr->s_send_wr, 0, sizeof(*send_wr));
+ wr->send_wr = &wr->s_send_wr;
+
+ wr->isert_cmd = isert_cmd;
+ rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
+
+ send_wr = &isert_cmd->rdma_wr.s_send_wr;
+ send_wr->sg_list = ib_sge;
+ send_wr->num_sge = 1;
+ send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ send_wr->opcode = IB_WR_RDMA_WRITE;
+ send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
+ send_wr->wr.rdma.rkey = isert_cmd->read_stag;
+ send_wr->send_flags = 0;
+ send_wr->next = &isert_cmd->tx_desc.send_wr;
+ } else {
+ send_wr->opcode = IB_WR_RDMA_READ;
+ send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
+ send_wr->wr.rdma.rkey = isert_cmd->write_stag;
+ send_wr->send_flags = IB_SEND_SIGNALED;
}
- isert_cmd->ib_sge = ib_sge;
- pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
- ib_sge, sg_nents);
+ data_len = min(data_left, rdma_write_max);
+ wr->cur_rdma_length = data_len;
- wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
- wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
- GFP_KERNEL);
- if (!wr->send_wr) {
- pr_debug("Unable to allocate wr->send_wr\n");
- ret = -ENOMEM;
+ spin_lock_irqsave(&isert_conn->conn_lock, flags);
+ fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+ struct fast_reg_descriptor, list);
+ list_del(&fr_desc->list);
+ spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ wr->fr_desc = fr_desc;
+
+ ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
+ ib_sge, offset, data_len);
+ if (ret) {
+ list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
goto unmap_sg;
}
- pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
- wr->send_wr, wr->send_wr_num);
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
+ return 0;
- wr->iser_ib_op = ISER_IB_RDMA_READ;
- wr->isert_cmd = isert_cmd;
- rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
- offset = cmd->write_data_done;
+unmap_sg:
+ ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
+ (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ return ret;
+}
- for (i = 0; i < wr->send_wr_num; i++) {
- send_wr = &isert_cmd->rdma_wr.send_wr[i];
- data_len = min(data_left, rdma_write_max);
+static int
+isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_device *device = isert_conn->conn_device;
+ struct ib_send_wr *wr_failed;
+ int rc;
- send_wr->opcode = IB_WR_RDMA_READ;
- send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
- send_wr->wr.rdma.rkey = isert_cmd->write_stag;
+ pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
+ isert_cmd, se_cmd->data_length);
+ wr->iser_ib_op = ISER_IB_RDMA_WRITE;
+ rc = device->reg_rdma_mem(conn, cmd, wr);
+ if (rc) {
+ pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+ return rc;
+ }
- ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
- send_wr, data_len, offset);
- ib_sge += ib_sge_cnt;
+ /*
+ * Build isert_conn->tx_desc for iSCSI response PDU and attach
+ */
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+ iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
+ &isert_cmd->tx_desc.iscsi_header);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
- if (i + 1 == wr->send_wr_num)
- send_wr->send_flags = IB_SEND_SIGNALED;
- else
- send_wr->next = &wr->send_wr[i + 1];
+ atomic_inc(&isert_conn->post_send_buf_count);
- offset += data_len;
- va_offset += data_len;
- data_left -= data_len;
+ rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ if (rc) {
+ pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
+ atomic_dec(&isert_conn->post_send_buf_count);
+ }
+ pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
+ isert_cmd);
+
+ return 1;
+}
+
+static int
+isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_device *device = isert_conn->conn_device;
+ struct ib_send_wr *wr_failed;
+ int rc;
+
+ pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
+ isert_cmd, se_cmd->data_length, cmd->write_data_done);
+ wr->iser_ib_op = ISER_IB_RDMA_READ;
+ rc = device->reg_rdma_mem(conn, cmd, wr);
+ if (rc) {
+ pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+ return rc;
}
atomic_inc(&isert_conn->post_send_buf_count);
@@ -2050,12 +2351,10 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
atomic_dec(&isert_conn->post_send_buf_count);
}
- pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
- return 0;
+ pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
+ isert_cmd);
-unmap_sg:
- ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
- return ret;
+ return 0;
}
static int
@@ -2224,6 +2523,14 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
int ret;
pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
+ /*
+ * For login requests after the first PDU, isert_rx_login_req() will
+ * kick schedule_delayed_work(&conn->login_work) as the packet is
+ * received, which turns this callback from iscsi_target_do_login_rx()
+ * into a NOP.
+ */
+ if (!login->first_request)
+ return 0;
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
if (ret)
@@ -2393,12 +2700,12 @@ static void isert_free_conn(struct iscsi_conn *conn)
static struct iscsit_transport iser_target_transport = {
.name = "IB/iSER",
.transport_type = ISCSI_INFINIBAND,
+ .priv_size = sizeof(struct isert_cmd),
.owner = THIS_MODULE,
.iscsit_setup_np = isert_setup_np,
.iscsit_accept_np = isert_accept_np,
.iscsit_free_np = isert_free_np,
.iscsit_free_conn = isert_free_conn,
- .iscsit_alloc_cmd = isert_alloc_cmd,
.iscsit_get_login_rx = isert_get_login_rx,
.iscsit_put_login_tx = isert_put_login_tx,
.iscsit_immediate_queue = isert_immediate_queue,
@@ -2425,21 +2732,10 @@ static int __init isert_init(void)
goto destroy_rx_wq;
}
- isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
- sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
- 0, NULL);
- if (!isert_cmd_cache) {
- pr_err("Unable to create isert_cmd_cache\n");
- ret = -ENOMEM;
- goto destroy_tx_cq;
- }
-
iscsit_register_transport(&iser_target_transport);
pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
return 0;
-destroy_tx_cq:
- destroy_workqueue(isert_comp_wq);
destroy_rx_wq:
destroy_workqueue(isert_rx_wq);
return ret;
@@ -2447,7 +2743,6 @@ destroy_rx_wq:
static void __exit isert_exit(void)
{
- kmem_cache_destroy(isert_cmd_cache);
destroy_workqueue(isert_comp_wq);
destroy_workqueue(isert_rx_wq);
iscsit_unregister_transport(&iser_target_transport);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 191117b..631f209 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -5,6 +5,7 @@
#include <rdma/rdma_cm.h>
#define ISERT_RDMA_LISTEN_BACKLOG 10
+#define ISCSI_ISER_SG_TABLESIZE 256
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -45,15 +46,26 @@ struct iser_tx_desc {
struct ib_send_wr send_wr;
} __packed;
+struct fast_reg_descriptor {
+ struct list_head list;
+ struct ib_mr *data_mr;
+ struct ib_fast_reg_page_list *data_frpl;
+ bool valid;
+};
+
struct isert_rdma_wr {
struct list_head wr_list;
struct isert_cmd *isert_cmd;
enum iser_ib_op_code iser_ib_op;
struct ib_sge *ib_sge;
+ struct ib_sge s_ib_sge;
int num_sge;
struct scatterlist *sge;
int send_wr_num;
struct ib_send_wr *send_wr;
+ struct ib_send_wr s_send_wr;
+ u32 cur_rdma_length;
+ struct fast_reg_descriptor *fr_desc;
};
struct isert_cmd {
@@ -67,8 +79,7 @@ struct isert_cmd {
u32 write_va_off;
u32 rdma_wr_num;
struct isert_conn *conn;
- struct iscsi_cmd iscsi_cmd;
- struct ib_sge *ib_sge;
+ struct iscsi_cmd *iscsi_cmd;
struct iser_tx_desc tx_desc;
struct isert_rdma_wr rdma_wr;
struct work_struct comp_work;
@@ -106,6 +117,10 @@ struct isert_conn {
wait_queue_head_t conn_wait;
wait_queue_head_t conn_wait_comp_err;
struct kref conn_kref;
+ struct list_head conn_frwr_pool;
+ int conn_frwr_pool_size;
+ /* lock to protect frwr_pool */
+ spinlock_t conn_lock;
};
#define ISERT_MAX_CQ 64
@@ -118,6 +133,7 @@ struct isert_cq_desc {
};
struct isert_device {
+ int use_frwr;
int cqs_used;
int refcount;
int cq_active_qps[ISERT_MAX_CQ];
@@ -128,6 +144,12 @@ struct isert_device {
struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
struct isert_cq_desc *cq_desc;
struct list_head dev_node;
+ struct ib_device_attr dev_attr;
+ int (*reg_rdma_mem)(struct iscsi_conn *conn,
+ struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr);
+ void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
+ struct isert_conn *isert_conn);
};
struct isert_np {
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index d2b34fb..b6ded17 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -48,6 +48,7 @@ struct evdev_client {
struct evdev *evdev;
struct list_head node;
int clkid;
+ bool revoked;
unsigned int bufsize;
struct input_event buffer[];
};
@@ -164,6 +165,9 @@ static void evdev_pass_values(struct evdev_client *client,
struct input_event event;
bool wakeup = false;
+ if (client->revoked)
+ return;
+
event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
mono : real);
@@ -240,7 +244,7 @@ static int evdev_flush(struct file *file, fl_owner_t id)
if (retval)
return retval;
- if (!evdev->exist)
+ if (!evdev->exist || client->revoked)
retval = -ENODEV;
else
retval = input_flush_device(&evdev->handle, file);
@@ -429,7 +433,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
if (retval)
return retval;
- if (!evdev->exist) {
+ if (!evdev->exist || client->revoked) {
retval = -ENODEV;
goto out;
}
@@ -482,7 +486,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
return -EINVAL;
for (;;) {
- if (!evdev->exist)
+ if (!evdev->exist || client->revoked)
return -ENODEV;
if (client->packet_head == client->tail &&
@@ -511,7 +515,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
if (!(file->f_flags & O_NONBLOCK)) {
error = wait_event_interruptible(evdev->wait,
client->packet_head != client->tail ||
- !evdev->exist);
+ !evdev->exist || client->revoked);
if (error)
return error;
}
@@ -529,7 +533,11 @@ static unsigned int evdev_poll(struct file *file, poll_table *wait)
poll_wait(file, &evdev->wait, wait);
- mask = evdev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR;
+ if (evdev->exist && !client->revoked)
+ mask = POLLOUT | POLLWRNORM;
+ else
+ mask = POLLHUP | POLLERR;
+
if (client->packet_head != client->tail)
mask |= POLLIN | POLLRDNORM;
@@ -795,6 +803,17 @@ static int evdev_handle_mt_request(struct input_dev *dev,
return 0;
}
+static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
+ struct file *file)
+{
+ client->revoked = true;
+ evdev_ungrab(evdev, client);
+ input_flush_device(&evdev->handle, file);
+ wake_up_interruptible(&evdev->wait);
+
+ return 0;
+}
+
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
@@ -857,6 +876,12 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
else
return evdev_ungrab(evdev, client);
+ case EVIOCREVOKE:
+ if (p)
+ return -EINVAL;
+ else
+ return evdev_revoke(evdev, client, file);
+
case EVIOCSCLOCKID:
if (copy_from_user(&i, p, sizeof(unsigned int)))
return -EFAULT;
@@ -1002,7 +1027,7 @@ static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
if (retval)
return retval;
- if (!evdev->exist) {
+ if (!evdev->exist || client->revoked) {
retval = -ENODEV;
goto out;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 269d4c3..c1edd39 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -224,7 +224,7 @@ config KEYBOARD_TCA6416
config KEYBOARD_TCA8418
tristate "TCA8418 Keypad Support"
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
select INPUT_MATRIXKMAP
help
This driver implements basic keypad functionality
@@ -303,7 +303,7 @@ config KEYBOARD_HP7XX
config KEYBOARD_LM8323
tristate "LM8323 keypad chip"
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
depends on LEDS_CLASS
help
If you say yes here you get support for the National Semiconductor
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 1e691a3..33b3e88 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -239,7 +239,6 @@ config SERIO_PS2MULT
config SERIO_ARC_PS2
tristate "ARC PS/2 support"
- depends on GENERIC_HARDIRQS
help
Say Y here if you have an ARC FPGA platform with a PS/2
controller in it.
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 3b9758b..e09ec67 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -389,7 +389,7 @@ config TOUCHSCREEN_MCS5000
config TOUCHSCREEN_MMS114
tristate "MELFAS MMS114 touchscreen"
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
help
Say Y here if you have the MELFAS MMS114 touchscreen controller
chip in your system.
@@ -845,7 +845,7 @@ config TOUCHSCREEN_TSC_SERIO
config TOUCHSCREEN_TSC2005
tristate "TSC2005 based touchscreens"
- depends on SPI_MASTER && GENERIC_HARDIRQS
+ depends on SPI_MASTER
help
Say Y here if you have a TSC2005 based touchscreen.
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 820d85c..fe302e3 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -17,6 +17,16 @@ config OF_IOMMU
def_bool y
depends on OF
+config FSL_PAMU
+ bool "Freescale IOMMU support"
+ depends on PPC_E500MC
+ select IOMMU_API
+ select GENERIC_ALLOCATOR
+ help
+ Freescale PAMU support. PAMU is the IOMMU present on Freescale QorIQ platforms.
+ PAMU can authorize memory access, remap the memory address, and remap I/O
+ transaction types.
+
# MSM IOMMU support
config MSM_IOMMU
bool "MSM IOMMU Support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index bbe7041..14c1f47 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
+obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6dc6594..72531f0 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -456,8 +456,10 @@ static int iommu_init_device(struct device *dev)
}
ret = init_iommu_group(dev);
- if (ret)
+ if (ret) {
+ free_dev_data(dev_data);
return ret;
+ }
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 7acbf35..8f798be 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1384,7 +1384,7 @@ static int iommu_init_msi(struct amd_iommu *iommu)
if (iommu->int_enabled)
goto enable_faults;
- if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
+ if (iommu->dev->msi_cap)
ret = iommu_setup_msi(iommu);
else
ret = -ENODEV;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ebd0a4c..f417e89 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -56,9 +56,6 @@
/* Maximum number of mapping groups per SMMU */
#define ARM_SMMU_MAX_SMRS 128
-/* Number of VMIDs per SMMU */
-#define ARM_SMMU_NUM_VMIDS 256
-
/* SMMU global address space */
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
@@ -87,6 +84,7 @@
#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
+#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
/* Stage-2 PTE */
#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
@@ -223,6 +221,7 @@
#define ARM_SMMU_CB_FAR_LO 0x60
#define ARM_SMMU_CB_FAR_HI 0x64
#define ARM_SMMU_CB_FSYNR0 0x68
+#define ARM_SMMU_CB_S1_TLBIASID 0x610
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
@@ -282,6 +281,8 @@
#define TTBCR2_ADDR_44 4
#define TTBCR2_ADDR_48 5
+#define TTBRn_HI_ASID_SHIFT 16
+
#define MAIR_ATTR_SHIFT(n) ((n) << 3)
#define MAIR_ATTR_MASK 0xff
#define MAIR_ATTR_DEVICE 0x04
@@ -305,7 +306,7 @@
#define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \
FSR_TLBLKF)
#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
- FSR_EF | FSR_PF | FSR_TF)
+ FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
#define FSYNR0_WNR (1 << 4)
@@ -365,21 +366,21 @@ struct arm_smmu_device {
u32 num_context_irqs;
unsigned int *irqs;
- DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS);
-
struct list_head list;
struct rb_root masters;
};
struct arm_smmu_cfg {
struct arm_smmu_device *smmu;
- u8 vmid;
u8 cbndx;
u8 irptndx;
u32 cbar;
pgd_t *pgd;
};
+#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
+#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
+
struct arm_smmu_domain {
/*
* A domain can span across multiple, chained SMMUs and requires
@@ -533,6 +534,25 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
}
}
+static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg)
+{
+ struct arm_smmu_device *smmu = cfg->smmu;
+ void __iomem *base = ARM_SMMU_GR0(smmu);
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+
+ if (stage1) {
+ base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ writel_relaxed(ARM_SMMU_CB_ASID(cfg),
+ base + ARM_SMMU_CB_S1_TLBIASID);
+ } else {
+ base = ARM_SMMU_GR0(smmu);
+ writel_relaxed(ARM_SMMU_CB_VMID(cfg),
+ base + ARM_SMMU_GR0_TLBIVMID);
+ }
+
+ arm_smmu_tlb_sync(smmu);
+}
+
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
int flags, ret;
@@ -590,6 +610,9 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+ if (!gfsr)
+ return IRQ_NONE;
+
gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
@@ -601,7 +624,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
gfsr, gfsynr0, gfsynr1, gfsynr2);
writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
- return IRQ_NONE;
+ return IRQ_HANDLED;
}
static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
@@ -618,14 +641,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
/* CBAR */
- reg = root_cfg->cbar |
- (root_cfg->vmid << CBAR_VMID_SHIFT);
+ reg = root_cfg->cbar;
if (smmu->version == 1)
reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
/* Use the weakest memory type, so it is overridden by the pte */
if (stage1)
reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+ else
+ reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
if (smmu->version > 1) {
@@ -687,15 +711,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
/* TTBR0 */
reg = __pa(root_cfg->pgd);
-#ifndef __BIG_ENDIAN
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
+ if (stage1)
+ reg |= ARM_SMMU_CB_ASID(root_cfg) << TTBRn_HI_ASID_SHIFT;
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-#else
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
- reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
-#endif
/*
* TTBCR
@@ -750,10 +770,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
}
- /* Nuke the TLB */
- writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
- arm_smmu_tlb_sync(smmu);
-
/* SCTLR */
reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
if (stage1)
@@ -790,11 +806,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
return -ENODEV;
}
- ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS);
- if (IS_ERR_VALUE(ret))
- return ret;
-
- root_cfg->vmid = ret;
if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
/*
* We will likely want to change this if/when KVM gets
@@ -813,10 +824,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
if (IS_ERR_VALUE(ret))
- goto out_free_vmid;
+ return ret;
root_cfg->cbndx = ret;
-
if (smmu->version == 1) {
root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
root_cfg->irptndx %= smmu->num_context_irqs;
@@ -840,8 +850,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
out_free_context:
__arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
-out_free_vmid:
- __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
return ret;
}
@@ -850,17 +858,22 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
struct arm_smmu_device *smmu = root_cfg->smmu;
+ void __iomem *cb_base;
int irq;
if (!smmu)
return;
+ /* Disable the context bank and nuke the TLB before freeing it. */
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
+ writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+ arm_smmu_tlb_inv_context(root_cfg);
+
if (root_cfg->irptndx != -1) {
irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
free_irq(irq, domain);
}
- __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
__arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
}
@@ -959,6 +972,11 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
static void arm_smmu_domain_destroy(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ /*
+ * Free the domain resources. We assume that all devices have
+ * already been detached.
+ */
arm_smmu_destroy_domain_context(domain);
arm_smmu_free_pgtables(smmu_domain);
kfree(smmu_domain);
@@ -1199,7 +1217,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
}
if (stage == 1) {
- pteval |= ARM_SMMU_PTE_AP_UNPRIV;
+ pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
pteval |= ARM_SMMU_PTE_AP_RDONLY;
@@ -1415,13 +1433,9 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
{
int ret;
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
- struct arm_smmu_device *smmu = root_cfg->smmu;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
- writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
- arm_smmu_tlb_sync(smmu);
+ arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
return ret ? ret : size;
}
@@ -1544,6 +1558,7 @@ static struct iommu_ops arm_smmu_ops = {
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR;
int i = 0;
u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
@@ -1553,6 +1568,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
}
+ /* Make sure all context banks are disabled */
+ for (i = 0; i < smmu->num_context_banks; ++i)
+ writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i));
+
/* Invalidate the TLB, just in case */
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
@@ -1906,7 +1925,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
of_node_put(master->of_node);
}
- if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS))
+ if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_err(dev, "removing device with active domains!\n");
for (i = 0; i < smmu->num_global_irqs; ++i)
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 3f32d64..0740189 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -247,50 +247,6 @@ static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
__raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
}
-void exynos_sysmmu_set_prefbuf(struct device *dev,
- unsigned long base0, unsigned long size0,
- unsigned long base1, unsigned long size1)
-{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
- unsigned long flags;
- int i;
-
- BUG_ON((base0 + size0) <= base0);
- BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
-
- read_lock_irqsave(&data->lock, flags);
- if (!is_sysmmu_active(data))
- goto finish;
-
- for (i = 0; i < data->nsfrs; i++) {
- if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
- if (!sysmmu_block(data->sfrbases[i]))
- continue;
-
- if (size1 == 0) {
- if (size0 <= SZ_128K) {
- base1 = base0;
- size1 = size0;
- } else {
- size1 = size0 -
- ALIGN(size0 / 2, SZ_64K);
- size0 = size0 - size1;
- base1 = base0 + size0;
- }
- }
-
- __sysmmu_set_prefbuf(
- data->sfrbases[i], base0, size0, 0);
- __sysmmu_set_prefbuf(
- data->sfrbases[i], base1, size1, 1);
-
- sysmmu_unblock(data->sfrbases[i]);
- }
- }
-finish:
- read_unlock_irqrestore(&data->lock, flags);
-}
-
static void __set_fault_handler(struct sysmmu_drvdata *data,
sysmmu_fault_handler_t handler)
{
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
new file mode 100644
index 0000000..cba0498
--- /dev/null
+++ b/drivers/iommu/fsl_pamu.c
@@ -0,0 +1,1309 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/bootmem.h>
+#include <linux/genalloc.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/fsl_guts.h>
+
+#include "fsl_pamu.h"
+
+/* define indexes for each operation mapping scenario */
+#define OMI_QMAN 0x00
+#define OMI_FMAN 0x01
+#define OMI_QMAN_PRIV 0x02
+#define OMI_CAAM 0x03
+
+#define make64(high, low) (((u64)(high) << 32) | (low))
+
+struct pamu_isr_data {
+ void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
+ unsigned int count; /* The number of PAMUs */
+};
+
+static struct paace *ppaact;
+static struct paace *spaact;
+static struct ome *omt;
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for QorIQ SOCs.
+ * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
+ * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
+ * string would be used.
+*/
+static const struct of_device_id guts_device_ids[] = {
+ { .compatible = "fsl,qoriq-device-config-1.0", },
+ { .compatible = "fsl,qoriq-device-config-2.0", },
+ {}
+};
+
+
+/*
+ * Table for matching compatible strings, for device tree
+ * L3 cache controller node.
+ * "fsl,t4240-l3-cache-controller" corresponds to T4,
+ * "fsl,b4860-l3-cache-controller" corresponds to B4 &
+ * "fsl,p4080-l3-cache-controller" corresponds to other,
+ * SOCs.
+*/
+static const struct of_device_id l3_device_ids[] = {
+ { .compatible = "fsl,t4240-l3-cache-controller", },
+ { .compatible = "fsl,b4860-l3-cache-controller", },
+ { .compatible = "fsl,p4080-l3-cache-controller", },
+ {}
+};
+
+/* maximum subwindows permitted per liodn */
+static u32 max_subwindow_count;
+
+/* Pool for fspi allocation */
+struct gen_pool *spaace_pool;
+
+/**
+ * pamu_get_max_subwin_cnt() - Return the maximum supported
+ * subwindow count per liodn.
+ *
+ */
+u32 pamu_get_max_subwin_cnt()
+{
+ return max_subwindow_count;
+}
+
+/**
+ * pamu_get_ppaace() - Return the primary PACCE
+ * @liodn: liodn PAACT index for desired PAACE
+ *
+ * Returns the ppace pointer upon success else return
+ * null.
+ */
+static struct paace *pamu_get_ppaace(int liodn)
+{
+ if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) {
+ pr_debug("PPAACT doesn't exist\n");
+ return NULL;
+ }
+
+ return &ppaact[liodn];
+}
+
+/**
+ * pamu_enable_liodn() - Set valid bit of PACCE
+ * @liodn: liodn PAACT index for desired PAACE
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+int pamu_enable_liodn(int liodn)
+{
+ struct paace *ppaace;
+
+ ppaace = pamu_get_ppaace(liodn);
+ if (!ppaace) {
+ pr_debug("Invalid primary paace entry\n");
+ return -ENOENT;
+ }
+
+ if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) {
+ pr_debug("liodn %d not configured\n", liodn);
+ return -EINVAL;
+ }
+
+ /* Ensure that all other stores to the ppaace complete first */
+ mb();
+
+ set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
+ mb();
+
+ return 0;
+}
+
+/**
+ * pamu_disable_liodn() - Clears valid bit of PACCE
+ * @liodn: liodn PAACT index for desired PAACE
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+int pamu_disable_liodn(int liodn)
+{
+ struct paace *ppaace;
+
+ ppaace = pamu_get_ppaace(liodn);
+ if (!ppaace) {
+ pr_debug("Invalid primary paace entry\n");
+ return -ENOENT;
+ }
+
+ set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
+ mb();
+
+ return 0;
+}
+
+/* Derive the window size encoding for a particular PAACE entry */
+static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
+{
+ /* Bug if not a power of 2 */
+ BUG_ON(!is_power_of_2(addrspace_size));
+
+ /* window size is 2^(WSE+1) bytes */
+ return __ffs(addrspace_size) - 1;
+}
+
+/* Derive the PAACE window count encoding for the subwindow count */
+static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
+{
+ /* window count is 2^(WCE+1) bytes */
+ return __ffs(subwindow_cnt) - 1;
+}
+
+/*
+ * Set the PAACE type as primary and set the coherency required domain
+ * attribute
+ */
+static void pamu_init_ppaace(struct paace *ppaace)
+{
+ set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY);
+
+ set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
+ PAACE_M_COHERENCE_REQ);
+}
+
+/*
+ * Set the PAACE type as secondary and set the coherency required domain
+ * attribute.
+ */
+static void pamu_init_spaace(struct paace *spaace)
+{
+ set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
+ set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
+ PAACE_M_COHERENCE_REQ);
+}
+
+/*
+ * Return the spaace (corresponding to the secondary window index)
+ * for a particular ppaace.
+ */
+static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
+{
+ u32 subwin_cnt;
+ struct paace *spaace = NULL;
+
+ subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
+
+ if (wnum < subwin_cnt)
+ spaace = &spaact[paace->fspi + wnum];
+ else
+ pr_debug("secondary paace out of bounds\n");
+
+ return spaace;
+}
+
+/**
+ * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
+ * required for primary PAACE in the secondary
+ * PAACE table.
+ * @subwin_cnt: Number of subwindows to be reserved.
+ *
+ * A PPAACE entry may have a number of associated subwindows. A subwindow
+ * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
+ * the index (fspi) of the first SPAACE entry in the SPAACT table. This
+ * function returns the index of the first SPAACE entry. The remaining
+ * SPAACE entries are reserved contiguously from that index.
+ *
+ * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
+ * If no SPAACE entry is available or the allocator can not reserve the required
+ * number of contiguous entries function returns ULONG_MAX indicating a failure.
+ *
+*/
+static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
+{
+ unsigned long spaace_addr;
+
+ spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
+ if (!spaace_addr)
+ return ULONG_MAX;
+
+ return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
+}
+
+/* Release the subwindows reserved for a particular LIODN */
+void pamu_free_subwins(int liodn)
+{
+ struct paace *ppaace;
+ u32 subwin_cnt, size;
+
+ ppaace = pamu_get_ppaace(liodn);
+ if (!ppaace) {
+ pr_debug("Invalid liodn entry\n");
+ return;
+ }
+
+ if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
+ subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
+ size = (subwin_cnt - 1) * sizeof(struct paace);
+ gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
+ }
+}
+
+/*
+ * Function used for updating stash destination for the coressponding
+ * LIODN.
+ */
+int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
+{
+ struct paace *paace;
+
+ paace = pamu_get_ppaace(liodn);
+ if (!paace) {
+ pr_debug("Invalid liodn entry\n");
+ return -ENOENT;
+ }
+ if (subwin) {
+ paace = pamu_get_spaace(paace, subwin - 1);
+ if (!paace) {
+ return -ENOENT;
+ }
+ }
+ set_bf(paace->impl_attr, PAACE_IA_CID, value);
+
+ mb();
+
+ return 0;
+}
+
+/* Disable a subwindow corresponding to the LIODN */
+int pamu_disable_spaace(int liodn, u32 subwin)
+{
+ struct paace *paace;
+
+ paace = pamu_get_ppaace(liodn);
+ if (!paace) {
+ pr_debug("Invalid liodn entry\n");
+ return -ENOENT;
+ }
+ if (subwin) {
+ paace = pamu_get_spaace(paace, subwin - 1);
+ if (!paace) {
+ return -ENOENT;
+ }
+ set_bf(paace->addr_bitfields, PAACE_AF_V,
+ PAACE_V_INVALID);
+ } else {
+ set_bf(paace->addr_bitfields, PAACE_AF_AP,
+ PAACE_AP_PERMS_DENIED);
+ }
+
+ mb();
+
+ return 0;
+}
+
+
+/**
+ * pamu_config_paace() - Sets up PPAACE entry for specified liodn
+ *
+ * @liodn: Logical IO device number
+ * @win_addr: starting address of DSA window
+ * @win-size: size of DSA window
+ * @omi: Operation mapping index -- if ~omi == 0 then omi not defined
+ * @rpn: real (true physical) page number
+ * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
+ * stashid not defined
+ * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
+ * snoopid not defined
+ * @subwin_cnt: number of sub-windows
+ * @prot: window permissions
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
+ u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
+ u32 subwin_cnt, int prot)
+{
+ struct paace *ppaace;
+ unsigned long fspi;
+
+ if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
+ pr_debug("window size too small or not a power of two %llx\n", win_size);
+ return -EINVAL;
+ }
+
+ if (win_addr & (win_size - 1)) {
+ pr_debug("window address is not aligned with window size\n");
+ return -EINVAL;
+ }
+
+ ppaace = pamu_get_ppaace(liodn);
+ if (!ppaace) {
+ return -ENOENT;
+ }
+
+ /* window size is 2^(WSE+1) bytes */
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
+ map_addrspace_size_to_wse(win_size));
+
+ pamu_init_ppaace(ppaace);
+
+ ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
+ (win_addr >> PAMU_PAGE_SHIFT));
+
+ /* set up operation mapping if it's configured */
+ if (omi < OME_NUMBER_ENTRIES) {
+ set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+ ppaace->op_encode.index_ot.omi = omi;
+ } else if (~omi != 0) {
+ pr_debug("bad operation mapping index: %d\n", omi);
+ return -EINVAL;
+ }
+
+ /* configure stash id */
+ if (~stashid != 0)
+ set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
+
+ /* configure snoop id */
+ if (~snoopid != 0)
+ ppaace->domain_attr.to_host.snpid = snoopid;
+
+ if (subwin_cnt) {
+ /* The first entry is in the primary PAACE instead */
+ fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
+ if (fspi == ULONG_MAX) {
+ pr_debug("spaace indexes exhausted\n");
+ return -EINVAL;
+ }
+
+ /* window count is 2^(WCE+1) bytes */
+ set_bf(ppaace->impl_attr, PAACE_IA_WCE,
+ map_subwindow_cnt_to_wce(subwin_cnt));
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
+ ppaace->fspi = fspi;
+ } else {
+ set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
+ ppaace->twbah = rpn >> 20;
+ set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
+ set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
+ set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
+ }
+ mb();
+
+ return 0;
+}
+
+/**
+ * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
+ *
+ * @liodn: Logical IO device number
+ * @subwin_cnt: number of sub-windows associated with dma-window
+ * @subwin: subwindow index
+ * @subwin_size: size of subwindow
+ * @omi: Operation mapping index
+ * @rpn: real (true physical) page number
+ * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
+ * snoopid not defined
+ * @stashid: cache stash id for associated cpu
+ * @enable: enable/disable subwindow after reconfiguration
+ * @prot: sub window permissions
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
+ phys_addr_t subwin_size, u32 omi, unsigned long rpn,
+ u32 snoopid, u32 stashid, int enable, int prot)
+{
+ struct paace *paace;
+
+
+ /* setup sub-windows */
+ if (!subwin_cnt) {
+ pr_debug("Invalid subwindow count\n");
+ return -EINVAL;
+ }
+
+ paace = pamu_get_ppaace(liodn);
+ if (subwin > 0 && subwin < subwin_cnt && paace) {
+ paace = pamu_get_spaace(paace, subwin - 1);
+
+ if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
+ pamu_init_spaace(paace);
+ set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
+ }
+ }
+
+ if (!paace) {
+ pr_debug("Invalid liodn entry\n");
+ return -ENOENT;
+ }
+
+ if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
+ pr_debug("subwindow size out of range, or not a power of 2\n");
+ return -EINVAL;
+ }
+
+ if (rpn == ULONG_MAX) {
+ pr_debug("real page number out of range\n");
+ return -EINVAL;
+ }
+
+ /* window size is 2^(WSE+1) bytes */
+ set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
+ map_addrspace_size_to_wse(subwin_size));
+
+ set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
+ paace->twbah = rpn >> 20;
+ set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
+ set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
+
+ /* configure snoop id */
+ if (~snoopid != 0)
+ paace->domain_attr.to_host.snpid = snoopid;
+
+ /* set up operation mapping if it's configured */
+ if (omi < OME_NUMBER_ENTRIES) {
+ set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+ paace->op_encode.index_ot.omi = omi;
+ } else if (~omi != 0) {
+ pr_debug("bad operation mapping index: %d\n", omi);
+ return -EINVAL;
+ }
+
+ if (~stashid != 0)
+ set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
+
+ smp_wmb();
+
+ if (enable)
+ set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
+
+ mb();
+
+ return 0;
+}
+
+/**
+* get_ome_index() - Returns the index in the operation mapping table
+* for device.
+* @*omi_index: pointer for storing the index value
+*
+*/
+void get_ome_index(u32 *omi_index, struct device *dev)
+{
+ if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
+ *omi_index = OMI_QMAN;
+ if (of_device_is_compatible(dev->of_node, "fsl,qman"))
+ *omi_index = OMI_QMAN_PRIV;
+}
+
+/**
+ * get_stash_id - Returns stash destination id corresponding to a
+ * cache type and vcpu.
+ * @stash_dest_hint: L1, L2 or L3
+ * @vcpu: vpcu target for a particular cache type.
+ *
+ * Returs stash on success or ~(u32)0 on failure.
+ *
+ */
+u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
+{
+ const u32 *prop;
+ struct device_node *node;
+ u32 cache_level;
+ int len, found = 0;
+ int i;
+
+ /* Fastpath, exit early if L3/CPC cache is target for stashing */
+ if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
+ node = of_find_matching_node(NULL, l3_device_ids);
+ if (node) {
+ prop = of_get_property(node, "cache-stash-id", 0);
+ if (!prop) {
+ pr_debug("missing cache-stash-id at %s\n", node->full_name);
+ of_node_put(node);
+ return ~(u32)0;
+ }
+ of_node_put(node);
+ return be32_to_cpup(prop);
+ }
+ return ~(u32)0;
+ }
+
+ for_each_node_by_type(node, "cpu") {
+ prop = of_get_property(node, "reg", &len);
+ for (i = 0; i < len / sizeof(u32); i++) {
+ if (be32_to_cpup(&prop[i]) == vcpu) {
+ found = 1;
+ goto found_cpu_node;
+ }
+ }
+ }
+found_cpu_node:
+
+ /* find the hwnode that represents the cache */
+ for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
+ if (stash_dest_hint == cache_level) {
+ prop = of_get_property(node, "cache-stash-id", 0);
+ if (!prop) {
+ pr_debug("missing cache-stash-id at %s\n", node->full_name);
+ of_node_put(node);
+ return ~(u32)0;
+ }
+ of_node_put(node);
+ return be32_to_cpup(prop);
+ }
+
+ prop = of_get_property(node, "next-level-cache", 0);
+ if (!prop) {
+ pr_debug("can't find next-level-cache at %s\n",
+ node->full_name);
+ of_node_put(node);
+ return ~(u32)0; /* can't traverse any further */
+ }
+ of_node_put(node);
+
+ /* advance to next node in cache hierarchy */
+ node = of_find_node_by_phandle(*prop);
+ if (!node) {
+ pr_debug("Invalid node for cache hierarchy %s\n",
+ node->full_name);
+ return ~(u32)0;
+ }
+ }
+
+ pr_debug("stash dest not found for %d on vcpu %d\n",
+ stash_dest_hint, vcpu);
+ return ~(u32)0;
+}
+
+/* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */
+#define QMAN_PAACE 1
+#define QMAN_PORTAL_PAACE 2
+#define BMAN_PAACE 3
+
+/**
+ * Setup operation mapping and stash destinations for QMAN and QMAN portal.
+ * Memory accesses to QMAN and BMAN private memory need not be coherent, so
+ * clear the PAACE entry coherency attribute for them.
+ */
+static void setup_qbman_paace(struct paace *ppaace, int paace_type)
+{
+ switch (paace_type) {
+ case QMAN_PAACE:
+ set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+ ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV;
+ /* setup QMAN Private data stashing for the L3 cache */
+ set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
+ set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
+ 0);
+ break;
+ case QMAN_PORTAL_PAACE:
+ set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+ ppaace->op_encode.index_ot.omi = OMI_QMAN;
+ /*Set DQRR and Frame stashing for the L3 cache */
+ set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
+ break;
+ case BMAN_PAACE:
+ set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
+ 0);
+ break;
+ }
+}
+
+/**
+ * Setup the operation mapping table for various devices. This is a static
+ * table where each table index corresponds to a particular device. PAMU uses
+ * this table to translate device transaction to appropriate corenet
+ * transaction.
+ */
+static void __init setup_omt(struct ome *omt)
+{
+ struct ome *ome;
+
+ /* Configure OMI_QMAN */
+ ome = &omt[OMI_QMAN];
+
+ ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
+ ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
+ ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+ ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
+
+ ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC;
+ ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE;
+
+ /* Configure OMI_FMAN */
+ ome = &omt[OMI_FMAN];
+ ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
+ ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+
+ /* Configure OMI_QMAN private */
+ ome = &omt[OMI_QMAN_PRIV];
+ ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
+ ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+ ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
+ ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA;
+
+ /* Configure OMI_CAAM */
+ ome = &omt[OMI_CAAM];
+ ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
+ ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+}
+
+/*
+ * Get the maximum number of PAACT table entries
+ * and subwindows supported by PAMU
+ */
+static void get_pamu_cap_values(unsigned long pamu_reg_base)
+{
+ u32 pc_val;
+
+ pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3));
+ /* Maximum number of subwindows per liodn */
+ max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val));
+}
+
+/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
+int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+ phys_addr_t omt_phys)
+{
+ u32 *pc;
+ struct pamu_mmap_regs *pamu_regs;
+
+ pc = (u32 *) (pamu_reg_base + PAMU_PC);
+ pamu_regs = (struct pamu_mmap_regs *)
+ (pamu_reg_base + PAMU_MMAP_REGS_BASE);
+
+ /* set up pointers to corenet control blocks */
+
+ out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
+ out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys));
+ ppaact_phys = ppaact_phys + PAACT_SIZE;
+ out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys));
+ out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys));
+
+ out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys));
+ out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys));
+ spaact_phys = spaact_phys + SPAACT_SIZE;
+ out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys));
+ out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys));
+
+ out_be32(&pamu_regs->obah, upper_32_bits(omt_phys));
+ out_be32(&pamu_regs->obal, lower_32_bits(omt_phys));
+ omt_phys = omt_phys + OMT_SIZE;
+ out_be32(&pamu_regs->olah, upper_32_bits(omt_phys));
+ out_be32(&pamu_regs->olal, lower_32_bits(omt_phys));
+
+ /*
+ * set PAMU enable bit,
+ * allow ppaact & omt to be cached
+ * & enable PAMU access violation interrupts.
+ */
+
+ out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
+ PAMU_ACCESS_VIOLATION_ENABLE);
+ out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
+ return 0;
+}
+
+/* Enable all device LIODNS */
+static void __init setup_liodns(void)
+{
+ int i, len;
+ struct paace *ppaace;
+ struct device_node *node = NULL;
+ const u32 *prop;
+
+ for_each_node_with_property(node, "fsl,liodn") {
+ prop = of_get_property(node, "fsl,liodn", &len);
+ for (i = 0; i < len / sizeof(u32); i++) {
+ int liodn;
+
+ liodn = be32_to_cpup(&prop[i]);
+ if (liodn >= PAACE_NUMBER_ENTRIES) {
+ pr_debug("Invalid LIODN value %d\n", liodn);
+ continue;
+ }
+ ppaace = pamu_get_ppaace(liodn);
+ pamu_init_ppaace(ppaace);
+ /* window size is 2^(WSE+1) bytes */
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
+ ppaace->wbah = 0;
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
+ set_bf(ppaace->impl_attr, PAACE_IA_ATM,
+ PAACE_ATM_NO_XLATE);
+ set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
+ PAACE_AP_PERMS_ALL);
+ if (of_device_is_compatible(node, "fsl,qman-portal"))
+ setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
+ if (of_device_is_compatible(node, "fsl,qman"))
+ setup_qbman_paace(ppaace, QMAN_PAACE);
+ if (of_device_is_compatible(node, "fsl,bman"))
+ setup_qbman_paace(ppaace, BMAN_PAACE);
+ mb();
+ pamu_enable_liodn(liodn);
+ }
+ }
+}
+
+irqreturn_t pamu_av_isr(int irq, void *arg)
+{
+ struct pamu_isr_data *data = arg;
+ phys_addr_t phys;
+ unsigned int i, j, ret;
+
+ pr_emerg("access violation interrupt\n");
+
+ for (i = 0; i < data->count; i++) {
+ void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
+ u32 pics = in_be32(p + PAMU_PICS);
+
+ if (pics & PAMU_ACCESS_VIOLATION_STAT) {
+ u32 avs1 = in_be32(p + PAMU_AVS1);
+ struct paace *paace;
+
+ pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
+ pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
+ pr_emerg("AVS1=%08x\n", avs1);
+ pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
+ pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
+ in_be32(p + PAMU_AVAL)));
+ pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
+ pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
+ in_be32(p + PAMU_POEAL)));
+
+ phys = make64(in_be32(p + PAMU_POEAH),
+ in_be32(p + PAMU_POEAL));
+
+ /* Assume that POEA points to a PAACE */
+ if (phys) {
+ u32 *paace = phys_to_virt(phys);
+
+ /* Only the first four words are relevant */
+ for (j = 0; j < 4; j++)
+ pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
+ }
+
+ /* clear access violation condition */
+ out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
+ paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
+ BUG_ON(!paace);
+ /* check if we got a violation for a disabled LIODN */
+ if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) {
+ /*
+ * As per hardware erratum A-003638, access
+ * violation can be reported for a disabled
+ * LIODN. If we hit that condition, disable
+ * access violation reporting.
+ */
+ pics &= ~PAMU_ACCESS_VIOLATION_ENABLE;
+ } else {
+ /* Disable the LIODN */
+ ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
+ BUG_ON(ret);
+ pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
+ }
+ out_be32((p + PAMU_PICS), pics);
+ }
+ }
+
+
+ return IRQ_HANDLED;
+}
+
+#define LAWAR_EN 0x80000000
+#define LAWAR_TARGET_MASK 0x0FF00000
+#define LAWAR_TARGET_SHIFT 20
+#define LAWAR_SIZE_MASK 0x0000003F
+#define LAWAR_CSDID_MASK 0x000FF000
+#define LAWAR_CSDID_SHIFT 12
+
+#define LAW_SIZE_4K 0xb
+
+struct ccsr_law {
+ u32 lawbarh; /* LAWn base address high */
+ u32 lawbarl; /* LAWn base address low */
+ u32 lawar; /* LAWn attributes */
+ u32 reserved;
+};
+
+/*
+ * Create a coherence subdomain for a given memory block.
+ */
+static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+{
+ struct device_node *np;
+ const __be32 *iprop;
+ void __iomem *lac = NULL; /* Local Access Control registers */
+ struct ccsr_law __iomem *law;
+ void __iomem *ccm = NULL;
+ u32 __iomem *csdids;
+ unsigned int i, num_laws, num_csds;
+ u32 law_target = 0;
+ u32 csd_id = 0;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law");
+ if (!np)
+ return -ENODEV;
+
+ iprop = of_get_property(np, "fsl,num-laws", NULL);
+ if (!iprop) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ num_laws = be32_to_cpup(iprop);
+ if (!num_laws) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ lac = of_iomap(np, 0);
+ if (!lac) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ /* LAW registers are at offset 0xC00 */
+ law = lac + 0xC00;
+
+ of_node_put(np);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf");
+ if (!np) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL);
+ if (!iprop) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ num_csds = be32_to_cpup(iprop);
+ if (!num_csds) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ccm = of_iomap(np, 0);
+ if (!ccm) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* The undocumented CSDID registers are at offset 0x600 */
+ csdids = ccm + 0x600;
+
+ of_node_put(np);
+ np = NULL;
+
+ /* Find an unused coherence subdomain ID */
+ for (csd_id = 0; csd_id < num_csds; csd_id++) {
+ if (!csdids[csd_id])
+ break;
+ }
+
+ /* Store the Port ID in the (undocumented) proper CIDMRxx register */
+ csdids[csd_id] = csd_port_id;
+
+ /* Find the DDR LAW that maps to our buffer. */
+ for (i = 0; i < num_laws; i++) {
+ if (law[i].lawar & LAWAR_EN) {
+ phys_addr_t law_start, law_end;
+
+ law_start = make64(law[i].lawbarh, law[i].lawbarl);
+ law_end = law_start +
+ (2ULL << (law[i].lawar & LAWAR_SIZE_MASK));
+
+ if (law_start <= phys && phys < law_end) {
+ law_target = law[i].lawar & LAWAR_TARGET_MASK;
+ break;
+ }
+ }
+ }
+
+ if (i == 0 || i == num_laws) {
+ /* This should never happen*/
+ ret = -ENOENT;
+ goto error;
+ }
+
+ /* Find a free LAW entry */
+ while (law[--i].lawar & LAWAR_EN) {
+ if (i == 0) {
+ /* No higher priority LAW slots available */
+ ret = -ENOENT;
+ goto error;
+ }
+ }
+
+ law[i].lawbarh = upper_32_bits(phys);
+ law[i].lawbarl = lower_32_bits(phys);
+ wmb();
+ law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) |
+ (LAW_SIZE_4K + get_order(size));
+ wmb();
+
+error:
+ if (ccm)
+ iounmap(ccm);
+
+ if (lac)
+ iounmap(lac);
+
+ if (np)
+ of_node_put(np);
+
+ return ret;
+}
+
+/*
+ * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a
+ * bit map of snoopers for a given range of memory mapped by a LAW.
+ *
+ * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this
+ * table should never need to be updated. SVRs are guaranteed to be unique, so
+ * there is no worry that a future SOC will inadvertently have one of these
+ * values.
+ */
+static const struct {
+ u32 svr;
+ u32 port_id;
+} port_id_map[] = {
+ {0x82100010, 0xFF000000}, /* P2040 1.0 */
+ {0x82100011, 0xFF000000}, /* P2040 1.1 */
+ {0x82100110, 0xFF000000}, /* P2041 1.0 */
+ {0x82100111, 0xFF000000}, /* P2041 1.1 */
+ {0x82110310, 0xFF000000}, /* P3041 1.0 */
+ {0x82110311, 0xFF000000}, /* P3041 1.1 */
+ {0x82010020, 0xFFF80000}, /* P4040 2.0 */
+ {0x82000020, 0xFFF80000}, /* P4080 2.0 */
+ {0x82210010, 0xFC000000}, /* P5010 1.0 */
+ {0x82210020, 0xFC000000}, /* P5010 2.0 */
+ {0x82200010, 0xFC000000}, /* P5020 1.0 */
+ {0x82050010, 0xFF800000}, /* P5021 1.0 */
+ {0x82040010, 0xFF800000}, /* P5040 1.0 */
+};
+
+#define SVR_SECURITY 0x80000 /* The Security (E) bit */
+
+static int __init fsl_pamu_probe(struct platform_device *pdev)
+{
+ void __iomem *pamu_regs = NULL;
+ struct ccsr_guts __iomem *guts_regs = NULL;
+ u32 pamubypenr, pamu_counter;
+ unsigned long pamu_reg_off;
+ unsigned long pamu_reg_base;
+ struct pamu_isr_data *data = NULL;
+ struct device_node *guts_node;
+ u64 size;
+ struct page *p;
+ int ret = 0;
+ int irq;
+ phys_addr_t ppaact_phys;
+ phys_addr_t spaact_phys;
+ phys_addr_t omt_phys;
+ size_t mem_size = 0;
+ unsigned int order = 0;
+ u32 csd_port_id = 0;
+ unsigned i;
+ /*
+ * enumerate all PAMUs and allocate and setup PAMU tables
+ * for each of them,
+ * NOTE : All PAMUs share the same LIODN tables.
+ */
+
+ pamu_regs = of_iomap(pdev->dev.of_node, 0);
+ if (!pamu_regs) {
+ dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
+ return -ENOMEM;
+ }
+ of_get_address(pdev->dev.of_node, 0, &size, NULL);
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (irq == NO_IRQ) {
+ dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
+ goto error;
+ }
+
+ data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ data->pamu_reg_base = pamu_regs;
+ data->count = size / PAMU_OFFSET;
+
+ /* The ISR needs access to the regs, so we won't iounmap them */
+ ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
+ ret, irq);
+ goto error;
+ }
+
+ guts_node = of_find_matching_node(NULL, guts_device_ids);
+ if (!guts_node) {
+ dev_err(&pdev->dev, "could not find GUTS node %s\n",
+ pdev->dev.of_node->full_name);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ guts_regs = of_iomap(guts_node, 0);
+ of_node_put(guts_node);
+ if (!guts_regs) {
+ dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ /* read in the PAMU capability registers */
+ get_pamu_cap_values((unsigned long)pamu_regs);
+ /*
+ * To simplify the allocation of a coherency domain, we allocate the
+ * PAACT and the OMT in the same memory buffer. Unfortunately, this
+ * wastes more memory compared to allocating the buffers separately.
+ */
+ /* Determine how much memory we need */
+ mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
+ (PAGE_SIZE << get_order(SPAACT_SIZE)) +
+ (PAGE_SIZE << get_order(OMT_SIZE));
+ order = get_order(mem_size);
+
+ p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!p) {
+ dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ppaact = page_address(p);
+ ppaact_phys = page_to_phys(p);
+
+ /* Make sure the memory is naturally aligned */
+ if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
+ dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
+ omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
+
+ dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
+ (unsigned long long) ppaact_phys);
+
+ /* Check to see if we need to implement the work-around on this SOC */
+
+ /* Determine the Port ID for our coherence subdomain */
+ for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
+ if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
+ csd_port_id = port_id_map[i].port_id;
+ dev_dbg(&pdev->dev, "found matching SVR %08x\n",
+ port_id_map[i].svr);
+ break;
+ }
+ }
+
+ if (csd_port_id) {
+ dev_dbg(&pdev->dev, "creating coherency subdomain at address "
+ "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
+ mem_size, csd_port_id);
+
+ ret = create_csd(ppaact_phys, mem_size, csd_port_id);
+ if (ret) {
+ dev_err(&pdev->dev, "could not create coherence "
+ "subdomain\n");
+ return ret;
+ }
+ }
+
+ spaact_phys = virt_to_phys(spaact);
+ omt_phys = virt_to_phys(omt);
+
+ spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
+ if (!spaace_pool) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
+ goto error;
+ }
+
+ ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
+ if (ret)
+ goto error_genpool;
+
+ pamubypenr = in_be32(&guts_regs->pamubypenr);
+
+ for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
+ pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
+
+ pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
+ setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
+ spaact_phys, omt_phys);
+ /* Disable PAMU bypass for this PAMU */
+ pamubypenr &= ~pamu_counter;
+ }
+
+ setup_omt(omt);
+
+ /* Enable all relevant PAMU(s) */
+ out_be32(&guts_regs->pamubypenr, pamubypenr);
+
+ iounmap(guts_regs);
+
+ /* Enable DMA for the LIODNs in the device tree*/
+
+ setup_liodns();
+
+ return 0;
+
+error_genpool:
+ gen_pool_destroy(spaace_pool);
+
+error:
+ if (irq != NO_IRQ)
+ free_irq(irq, data);
+
+ if (data) {
+ memset(data, 0, sizeof(struct pamu_isr_data));
+ kfree(data);
+ }
+
+ if (pamu_regs)
+ iounmap(pamu_regs);
+
+ if (guts_regs)
+ iounmap(guts_regs);
+
+ if (ppaact)
+ free_pages((unsigned long)ppaact, order);
+
+ ppaact = NULL;
+
+ return ret;
+}
+
+static const struct of_device_id fsl_of_pamu_ids[] = {
+ {
+ .compatible = "fsl,p4080-pamu",
+ },
+ {
+ .compatible = "fsl,pamu",
+ },
+ {},
+};
+
+static struct platform_driver fsl_of_pamu_driver = {
+ .driver = {
+ .name = "fsl-of-pamu",
+ .owner = THIS_MODULE,
+ },
+ .probe = fsl_pamu_probe,
+};
+
+static __init int fsl_pamu_init(void)
+{
+ struct platform_device *pdev = NULL;
+ struct device_node *np;
+ int ret;
+
+ /*
+ * The normal OF process calls the probe function at some
+ * indeterminate later time, after most drivers have loaded. This is
+ * too late for us, because PAMU clients (like the Qman driver)
+ * depend on PAMU being initialized early.
+ *
+ * So instead, we "manually" call our probe function by creating the
+ * platform devices ourselves.
+ */
+
+ /*
+ * We assume that there is only one PAMU node in the device tree. A
+ * single PAMU node represents all of the PAMU devices in the SOC
+ * already. Everything else already makes that assumption, and the
+ * binding for the PAMU nodes doesn't allow for any parent-child
+ * relationships anyway. In other words, support for more than one
+ * PAMU node would require significant changes to a lot of code.
+ */
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,pamu");
+ if (!np) {
+ pr_err("could not find a PAMU node\n");
+ return -ENODEV;
+ }
+
+ ret = platform_driver_register(&fsl_of_pamu_driver);
+ if (ret) {
+ pr_err("could not register driver (err=%i)\n", ret);
+ goto error_driver_register;
+ }
+
+ pdev = platform_device_alloc("fsl-of-pamu", 0);
+ if (!pdev) {
+ pr_err("could not allocate device %s\n",
+ np->full_name);
+ ret = -ENOMEM;
+ goto error_device_alloc;
+ }
+ pdev->dev.of_node = of_node_get(np);
+
+ ret = pamu_domain_init();
+ if (ret)
+ goto error_device_add;
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("could not add device %s (err=%i)\n",
+ np->full_name, ret);
+ goto error_device_add;
+ }
+
+ return 0;
+
+error_device_add:
+ of_node_put(pdev->dev.of_node);
+ pdev->dev.of_node = NULL;
+
+ platform_device_put(pdev);
+
+error_device_alloc:
+ platform_driver_unregister(&fsl_of_pamu_driver);
+
+error_driver_register:
+ of_node_put(np);
+
+ return ret;
+}
+arch_initcall(fsl_pamu_init);
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
new file mode 100644
index 0000000..8fc1a12
--- /dev/null
+++ b/drivers/iommu/fsl_pamu.h
@@ -0,0 +1,410 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_PAMU_H
+#define __FSL_PAMU_H
+
+#include <asm/fsl_pamu_stash.h>
+
+/* Bit Field macros
+ * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
+ */
+#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
+#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT))
+
+/* PAMU CCSR space */
+#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
+#define PAMU_PE 0x40000000 /* enable PAMU */
+
+/* PAMU_OFFSET to the next pamu space in ccsr */
+#define PAMU_OFFSET 0x1000
+
+#define PAMU_MMAP_REGS_BASE 0
+
+struct pamu_mmap_regs {
+ u32 ppbah;
+ u32 ppbal;
+ u32 pplah;
+ u32 pplal;
+ u32 spbah;
+ u32 spbal;
+ u32 splah;
+ u32 splal;
+ u32 obah;
+ u32 obal;
+ u32 olah;
+ u32 olal;
+};
+
+/* PAMU Error Registers */
+#define PAMU_POES1 0x0040
+#define PAMU_POES2 0x0044
+#define PAMU_POEAH 0x0048
+#define PAMU_POEAL 0x004C
+#define PAMU_AVS1 0x0050
+#define PAMU_AVS1_AV 0x1
+#define PAMU_AVS1_OTV 0x6
+#define PAMU_AVS1_APV 0x78
+#define PAMU_AVS1_WAV 0x380
+#define PAMU_AVS1_LAV 0x1c00
+#define PAMU_AVS1_GCV 0x2000
+#define PAMU_AVS1_PDV 0x4000
+#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
+ | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
+#define PAMU_AVS1_LIODN_SHIFT 16
+#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
+
+#define PAMU_AVS2 0x0054
+#define PAMU_AVAH 0x0058
+#define PAMU_AVAL 0x005C
+#define PAMU_EECTL 0x0060
+#define PAMU_EEDIS 0x0064
+#define PAMU_EEINTEN 0x0068
+#define PAMU_EEDET 0x006C
+#define PAMU_EEATTR 0x0070
+#define PAMU_EEAHI 0x0074
+#define PAMU_EEALO 0x0078
+#define PAMU_EEDHI 0X007C
+#define PAMU_EEDLO 0x0080
+#define PAMU_EECC 0x0084
+#define PAMU_UDAD 0x0090
+
+/* PAMU Revision Registers */
+#define PAMU_PR1 0x0BF8
+#define PAMU_PR2 0x0BFC
+
+/* PAMU version mask */
+#define PAMU_PR1_MASK 0xffff
+
+/* PAMU Capabilities Registers */
+#define PAMU_PC1 0x0C00
+#define PAMU_PC2 0x0C04
+#define PAMU_PC3 0x0C08
+#define PAMU_PC4 0x0C0C
+
+/* PAMU Control Register */
+#define PAMU_PC 0x0C10
+
+/* PAMU control defs */
+#define PAMU_CONTROL 0x0C10
+#define PAMU_PC_PGC 0x80000000 /* PAMU gate closed bit */
+#define PAMU_PC_PE 0x40000000 /* PAMU enable bit */
+#define PAMU_PC_SPCC 0x00000010 /* sPAACE cache enable */
+#define PAMU_PC_PPCC 0x00000001 /* pPAACE cache enable */
+#define PAMU_PC_OCE 0x00001000 /* OMT cache enable */
+
+#define PAMU_PFA1 0x0C14
+#define PAMU_PFA2 0x0C18
+
+#define PAMU_PC2_MLIODN(X) ((X) >> 16)
+#define PAMU_PC3_MWCE(X) (((X) >> 21) & 0xf)
+
+/* PAMU Interrupt control and Status Register */
+#define PAMU_PICS 0x0C1C
+#define PAMU_ACCESS_VIOLATION_STAT 0x8
+#define PAMU_ACCESS_VIOLATION_ENABLE 0x4
+
+/* PAMU Debug Registers */
+#define PAMU_PD1 0x0F00
+#define PAMU_PD2 0x0F04
+#define PAMU_PD3 0x0F08
+#define PAMU_PD4 0x0F0C
+
+#define PAACE_AP_PERMS_DENIED 0x0
+#define PAACE_AP_PERMS_QUERY 0x1
+#define PAACE_AP_PERMS_UPDATE 0x2
+#define PAACE_AP_PERMS_ALL 0x3
+
+#define PAACE_DD_TO_HOST 0x0
+#define PAACE_DD_TO_IO 0x1
+#define PAACE_PT_PRIMARY 0x0
+#define PAACE_PT_SECONDARY 0x1
+#define PAACE_V_INVALID 0x0
+#define PAACE_V_VALID 0x1
+#define PAACE_MW_SUBWINDOWS 0x1
+
+#define PAACE_WSE_4K 0xB
+#define PAACE_WSE_8K 0xC
+#define PAACE_WSE_16K 0xD
+#define PAACE_WSE_32K 0xE
+#define PAACE_WSE_64K 0xF
+#define PAACE_WSE_128K 0x10
+#define PAACE_WSE_256K 0x11
+#define PAACE_WSE_512K 0x12
+#define PAACE_WSE_1M 0x13
+#define PAACE_WSE_2M 0x14
+#define PAACE_WSE_4M 0x15
+#define PAACE_WSE_8M 0x16
+#define PAACE_WSE_16M 0x17
+#define PAACE_WSE_32M 0x18
+#define PAACE_WSE_64M 0x19
+#define PAACE_WSE_128M 0x1A
+#define PAACE_WSE_256M 0x1B
+#define PAACE_WSE_512M 0x1C
+#define PAACE_WSE_1G 0x1D
+#define PAACE_WSE_2G 0x1E
+#define PAACE_WSE_4G 0x1F
+
+#define PAACE_DID_PCI_EXPRESS_1 0x00
+#define PAACE_DID_PCI_EXPRESS_2 0x01
+#define PAACE_DID_PCI_EXPRESS_3 0x02
+#define PAACE_DID_PCI_EXPRESS_4 0x03
+#define PAACE_DID_LOCAL_BUS 0x04
+#define PAACE_DID_SRIO 0x0C
+#define PAACE_DID_MEM_1 0x10
+#define PAACE_DID_MEM_2 0x11
+#define PAACE_DID_MEM_3 0x12
+#define PAACE_DID_MEM_4 0x13
+#define PAACE_DID_MEM_1_2 0x14
+#define PAACE_DID_MEM_3_4 0x15
+#define PAACE_DID_MEM_1_4 0x16
+#define PAACE_DID_BM_SW_PORTAL 0x18
+#define PAACE_DID_PAMU 0x1C
+#define PAACE_DID_CAAM 0x21
+#define PAACE_DID_QM_SW_PORTAL 0x3C
+#define PAACE_DID_CORE0_INST 0x80
+#define PAACE_DID_CORE0_DATA 0x81
+#define PAACE_DID_CORE1_INST 0x82
+#define PAACE_DID_CORE1_DATA 0x83
+#define PAACE_DID_CORE2_INST 0x84
+#define PAACE_DID_CORE2_DATA 0x85
+#define PAACE_DID_CORE3_INST 0x86
+#define PAACE_DID_CORE3_DATA 0x87
+#define PAACE_DID_CORE4_INST 0x88
+#define PAACE_DID_CORE4_DATA 0x89
+#define PAACE_DID_CORE5_INST 0x8A
+#define PAACE_DID_CORE5_DATA 0x8B
+#define PAACE_DID_CORE6_INST 0x8C
+#define PAACE_DID_CORE6_DATA 0x8D
+#define PAACE_DID_CORE7_INST 0x8E
+#define PAACE_DID_CORE7_DATA 0x8F
+#define PAACE_DID_BROADCAST 0xFF
+
+#define PAACE_ATM_NO_XLATE 0x00
+#define PAACE_ATM_WINDOW_XLATE 0x01
+#define PAACE_ATM_PAGE_XLATE 0x02
+#define PAACE_ATM_WIN_PG_XLATE \
+ (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
+#define PAACE_OTM_NO_XLATE 0x00
+#define PAACE_OTM_IMMEDIATE 0x01
+#define PAACE_OTM_INDEXED 0x02
+#define PAACE_OTM_RESERVED 0x03
+
+#define PAACE_M_COHERENCE_REQ 0x01
+
+#define PAACE_PID_0 0x0
+#define PAACE_PID_1 0x1
+#define PAACE_PID_2 0x2
+#define PAACE_PID_3 0x3
+#define PAACE_PID_4 0x4
+#define PAACE_PID_5 0x5
+#define PAACE_PID_6 0x6
+#define PAACE_PID_7 0x7
+
+#define PAACE_TCEF_FORMAT0_8B 0x00
+#define PAACE_TCEF_FORMAT1_RSVD 0x01
+/*
+ * Hard coded value for the PAACT size to accomodate
+ * maximum LIODN value generated by u-boot.
+ */
+#define PAACE_NUMBER_ENTRIES 0x500
+/* Hard coded value for the SPAACT size */
+#define SPAACE_NUMBER_ENTRIES 0x800
+
+#define OME_NUMBER_ENTRIES 16
+
+/* PAACE Bit Field Defines */
+#define PPAACE_AF_WBAL 0xfffff000
+#define PPAACE_AF_WBAL_SHIFT 12
+#define PPAACE_AF_WSE 0x00000fc0
+#define PPAACE_AF_WSE_SHIFT 6
+#define PPAACE_AF_MW 0x00000020
+#define PPAACE_AF_MW_SHIFT 5
+
+#define SPAACE_AF_LIODN 0xffff0000
+#define SPAACE_AF_LIODN_SHIFT 16
+
+#define PAACE_AF_AP 0x00000018
+#define PAACE_AF_AP_SHIFT 3
+#define PAACE_AF_DD 0x00000004
+#define PAACE_AF_DD_SHIFT 2
+#define PAACE_AF_PT 0x00000002
+#define PAACE_AF_PT_SHIFT 1
+#define PAACE_AF_V 0x00000001
+#define PAACE_AF_V_SHIFT 0
+
+#define PAACE_DA_HOST_CR 0x80
+#define PAACE_DA_HOST_CR_SHIFT 7
+
+#define PAACE_IA_CID 0x00FF0000
+#define PAACE_IA_CID_SHIFT 16
+#define PAACE_IA_WCE 0x000000F0
+#define PAACE_IA_WCE_SHIFT 4
+#define PAACE_IA_ATM 0x0000000C
+#define PAACE_IA_ATM_SHIFT 2
+#define PAACE_IA_OTM 0x00000003
+#define PAACE_IA_OTM_SHIFT 0
+
+#define PAACE_WIN_TWBAL 0xfffff000
+#define PAACE_WIN_TWBAL_SHIFT 12
+#define PAACE_WIN_SWSE 0x00000fc0
+#define PAACE_WIN_SWSE_SHIFT 6
+
+/* PAMU Data Structures */
+/* primary / secondary paact structure */
+struct paace {
+ /* PAACE Offset 0x00 */
+ u32 wbah; /* only valid for Primary PAACE */
+ u32 addr_bitfields; /* See P/S PAACE_AF_* */
+
+ /* PAACE Offset 0x08 */
+ /* Interpretation of first 32 bits dependent on DD above */
+ union {
+ struct {
+ /* Destination ID, see PAACE_DID_* defines */
+ u8 did;
+ /* Partition ID */
+ u8 pid;
+ /* Snoop ID */
+ u8 snpid;
+ /* coherency_required : 1 reserved : 7 */
+ u8 coherency_required; /* See PAACE_DA_* */
+ } to_host;
+ struct {
+ /* Destination ID, see PAACE_DID_* defines */
+ u8 did;
+ u8 reserved1;
+ u16 reserved2;
+ } to_io;
+ } domain_attr;
+
+ /* Implementation attributes + window count + address & operation translation modes */
+ u32 impl_attr; /* See PAACE_IA_* */
+
+ /* PAACE Offset 0x10 */
+ /* Translated window base address */
+ u32 twbah;
+ u32 win_bitfields; /* See PAACE_WIN_* */
+
+ /* PAACE Offset 0x18 */
+ /* first secondary paace entry */
+ u32 fspi; /* only valid for Primary PAACE */
+ union {
+ struct {
+ u8 ioea;
+ u8 moea;
+ u8 ioeb;
+ u8 moeb;
+ } immed_ot;
+ struct {
+ u16 reserved;
+ u16 omi;
+ } index_ot;
+ } op_encode;
+
+ /* PAACE Offsets 0x20-0x38 */
+ u32 reserved[8]; /* not currently implemented */
+};
+
+/* OME : Operation mapping entry
+ * MOE : Mapped Operation Encodings
+ * The operation mapping table is table containing operation mapping entries (OME).
+ * The index of a particular OME is programmed in the PAACE entry for translation
+ * in bound I/O operations corresponding to an LIODN. The OMT is used for translation
+ * specifically in case of the indexed translation mode. Each OME contains a 128
+ * byte mapped operation encoding (MOE), where each byte represents an MOE.
+ */
+#define NUM_MOE 128
+struct ome {
+ u8 moe[NUM_MOE];
+} __attribute__((packed));
+
+#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
+#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
+#define OMT_SIZE (sizeof(struct ome) * OME_NUMBER_ENTRIES)
+
+#define PAMU_PAGE_SHIFT 12
+#define PAMU_PAGE_SIZE 4096ULL
+
+#define IOE_READ 0x00
+#define IOE_READ_IDX 0x00
+#define IOE_WRITE 0x81
+#define IOE_WRITE_IDX 0x01
+#define IOE_EREAD0 0x82 /* Enhanced read type 0 */
+#define IOE_EREAD0_IDX 0x02 /* Enhanced read type 0 */
+#define IOE_EWRITE0 0x83 /* Enhanced write type 0 */
+#define IOE_EWRITE0_IDX 0x03 /* Enhanced write type 0 */
+#define IOE_DIRECT0 0x84 /* Directive type 0 */
+#define IOE_DIRECT0_IDX 0x04 /* Directive type 0 */
+#define IOE_EREAD1 0x85 /* Enhanced read type 1 */
+#define IOE_EREAD1_IDX 0x05 /* Enhanced read type 1 */
+#define IOE_EWRITE1 0x86 /* Enhanced write type 1 */
+#define IOE_EWRITE1_IDX 0x06 /* Enhanced write type 1 */
+#define IOE_DIRECT1 0x87 /* Directive type 1 */
+#define IOE_DIRECT1_IDX 0x07 /* Directive type 1 */
+#define IOE_RAC 0x8c /* Read with Atomic clear */
+#define IOE_RAC_IDX 0x0c /* Read with Atomic clear */
+#define IOE_RAS 0x8d /* Read with Atomic set */
+#define IOE_RAS_IDX 0x0d /* Read with Atomic set */
+#define IOE_RAD 0x8e /* Read with Atomic decrement */
+#define IOE_RAD_IDX 0x0e /* Read with Atomic decrement */
+#define IOE_RAI 0x8f /* Read with Atomic increment */
+#define IOE_RAI_IDX 0x0f /* Read with Atomic increment */
+
+#define EOE_READ 0x00
+#define EOE_WRITE 0x01
+#define EOE_RAC 0x0c /* Read with Atomic clear */
+#define EOE_RAS 0x0d /* Read with Atomic set */
+#define EOE_RAD 0x0e /* Read with Atomic decrement */
+#define EOE_RAI 0x0f /* Read with Atomic increment */
+#define EOE_LDEC 0x10 /* Load external cache */
+#define EOE_LDECL 0x11 /* Load external cache with stash lock */
+#define EOE_LDECPE 0x12 /* Load external cache with preferred exclusive */
+#define EOE_LDECPEL 0x13 /* Load external cache with preferred exclusive and lock */
+#define EOE_LDECFE 0x14 /* Load external cache with forced exclusive */
+#define EOE_LDECFEL 0x15 /* Load external cache with forced exclusive and lock */
+#define EOE_RSA 0x16 /* Read with stash allocate */
+#define EOE_RSAU 0x17 /* Read with stash allocate and unlock */
+#define EOE_READI 0x18 /* Read with invalidate */
+#define EOE_RWNITC 0x19 /* Read with no intention to cache */
+#define EOE_WCI 0x1a /* Write cache inhibited */
+#define EOE_WWSA 0x1b /* Write with stash allocate */
+#define EOE_WWSAL 0x1c /* Write with stash allocate and lock */
+#define EOE_WWSAO 0x1d /* Write with stash allocate only */
+#define EOE_WWSAOL 0x1e /* Write with stash allocate only and lock */
+#define EOE_VALID 0x80
+
+/* Function prototypes */
+int pamu_domain_init(void);
+int pamu_enable_liodn(int liodn);
+int pamu_disable_liodn(int liodn);
+void pamu_free_subwins(int liodn);
+int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
+ u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid,
+ u32 subwin_cnt, int prot);
+int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr,
+ phys_addr_t subwin_size, u32 omi, unsigned long rpn,
+ uint32_t snoopid, u32 stashid, int enable, int prot);
+
+u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
+void get_ome_index(u32 *omi_index, struct device *dev);
+int pamu_update_paace_stash(int liodn, u32 subwin, u32 value);
+int pamu_disable_spaace(int liodn, u32 subwin);
+u32 pamu_get_max_subwin_cnt(void);
+
+#endif /* __FSL_PAMU_H */
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
new file mode 100644
index 0000000..c857c30
--- /dev/null
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -0,0 +1,1172 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Author: Varun Sethi <varun.sethi@freescale.com>
+ *
+ */
+
+#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/iommu.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+
+#include <asm/pci-bridge.h>
+#include <sysdev/fsl_pci.h>
+
+#include "fsl_pamu_domain.h"
+#include "pci.h"
+
+/*
+ * Global spinlock that needs to be held while
+ * configuring PAMU.
+ */
+static DEFINE_SPINLOCK(iommu_lock);
+
+static struct kmem_cache *fsl_pamu_domain_cache;
+static struct kmem_cache *iommu_devinfo_cache;
+static DEFINE_SPINLOCK(device_domain_lock);
+
+static int __init iommu_init_mempool(void)
+{
+
+ fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
+ sizeof(struct fsl_dma_domain),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+
+ NULL);
+ if (!fsl_pamu_domain_cache) {
+ pr_debug("Couldn't create fsl iommu_domain cache\n");
+ return -ENOMEM;
+ }
+
+ iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
+ sizeof(struct device_domain_info),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!iommu_devinfo_cache) {
+ pr_debug("Couldn't create devinfo cache\n");
+ kmem_cache_destroy(fsl_pamu_domain_cache);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
+{
+ u32 win_cnt = dma_domain->win_cnt;
+ struct dma_window *win_ptr =
+ &dma_domain->win_arr[0];
+ struct iommu_domain_geometry *geom;
+
+ geom = &dma_domain->iommu_domain->geometry;
+
+ if (!win_cnt || !dma_domain->geom_size) {
+ pr_debug("Number of windows/geometry not configured for the domain\n");
+ return 0;
+ }
+
+ if (win_cnt > 1) {
+ u64 subwin_size;
+ dma_addr_t subwin_iova;
+ u32 wnd;
+
+ subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
+ subwin_iova = iova & ~(subwin_size - 1);
+ wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
+ win_ptr = &dma_domain->win_arr[wnd];
+ }
+
+ if (win_ptr->valid)
+ return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
+
+ return 0;
+}
+
+static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
+{
+ struct dma_window *sub_win_ptr =
+ &dma_domain->win_arr[0];
+ int i, ret;
+ unsigned long rpn, flags;
+
+ for (i = 0; i < dma_domain->win_cnt; i++) {
+ if (sub_win_ptr[i].valid) {
+ rpn = sub_win_ptr[i].paddr >>
+ PAMU_PAGE_SHIFT;
+ spin_lock_irqsave(&iommu_lock, flags);
+ ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
+ sub_win_ptr[i].size,
+ ~(u32)0,
+ rpn,
+ dma_domain->snoop_id,
+ dma_domain->stash_id,
+ (i > 0) ? 1 : 0,
+ sub_win_ptr[i].prot);
+ spin_unlock_irqrestore(&iommu_lock, flags);
+ if (ret) {
+ pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
+ liodn);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
+{
+ int ret;
+ struct dma_window *wnd = &dma_domain->win_arr[0];
+ phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu_lock, flags);
+ ret = pamu_config_ppaace(liodn, wnd_addr,
+ wnd->size,
+ ~(u32)0,
+ wnd->paddr >> PAMU_PAGE_SHIFT,
+ dma_domain->snoop_id, dma_domain->stash_id,
+ 0, wnd->prot);
+ spin_unlock_irqrestore(&iommu_lock, flags);
+ if (ret)
+ pr_debug("PAMU PAACE configuration failed for liodn %d\n",
+ liodn);
+
+ return ret;
+}
+
+/* Map the DMA window corresponding to the LIODN */
+static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
+{
+ if (dma_domain->win_cnt > 1)
+ return map_subwins(liodn, dma_domain);
+ else
+ return map_win(liodn, dma_domain);
+
+}
+
+/* Update window/subwindow mapping for the LIODN */
+static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
+{
+ int ret;
+ struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu_lock, flags);
+ if (dma_domain->win_cnt > 1) {
+ ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
+ wnd->size,
+ ~(u32)0,
+ wnd->paddr >> PAMU_PAGE_SHIFT,
+ dma_domain->snoop_id,
+ dma_domain->stash_id,
+ (wnd_nr > 0) ? 1 : 0,
+ wnd->prot);
+ if (ret)
+ pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
+ } else {
+ phys_addr_t wnd_addr;
+
+ wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
+
+ ret = pamu_config_ppaace(liodn, wnd_addr,
+ wnd->size,
+ ~(u32)0,
+ wnd->paddr >> PAMU_PAGE_SHIFT,
+ dma_domain->snoop_id, dma_domain->stash_id,
+ 0, wnd->prot);
+ if (ret)
+ pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
+ }
+
+ spin_unlock_irqrestore(&iommu_lock, flags);
+
+ return ret;
+}
+
+static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
+ u32 val)
+{
+ int ret = 0, i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu_lock, flags);
+ if (!dma_domain->win_arr) {
+ pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
+ spin_unlock_irqrestore(&iommu_lock, flags);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dma_domain->win_cnt; i++) {
+ ret = pamu_update_paace_stash(liodn, i, val);
+ if (ret) {
+ pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
+ spin_unlock_irqrestore(&iommu_lock, flags);
+ return ret;
+ }
+ }
+
+ spin_unlock_irqrestore(&iommu_lock, flags);
+
+ return ret;
+}
+
+/* Set the geometry parameters for a LIODN */
+static int pamu_set_liodn(int liodn, struct device *dev,
+ struct fsl_dma_domain *dma_domain,
+ struct iommu_domain_geometry *geom_attr,
+ u32 win_cnt)
+{
+ phys_addr_t window_addr, window_size;
+ phys_addr_t subwin_size;
+ int ret = 0, i;
+ u32 omi_index = ~(u32)0;
+ unsigned long flags;
+
+ /*
+ * Configure the omi_index at the geometry setup time.
+ * This is a static value which depends on the type of
+ * device and would not change thereafter.
+ */
+ get_ome_index(&omi_index, dev);
+
+ window_addr = geom_attr->aperture_start;
+ window_size = dma_domain->geom_size;
+
+ spin_lock_irqsave(&iommu_lock, flags);
+ ret = pamu_disable_liodn(liodn);
+ if (!ret)
+ ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
+ 0, dma_domain->snoop_id,
+ dma_domain->stash_id, win_cnt, 0);
+ spin_unlock_irqrestore(&iommu_lock, flags);
+ if (ret) {
+ pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
+ return ret;
+ }
+
+ if (win_cnt > 1) {
+ subwin_size = window_size >> ilog2(win_cnt);
+ for (i = 0; i < win_cnt; i++) {
+ spin_lock_irqsave(&iommu_lock, flags);
+ ret = pamu_disable_spaace(liodn, i);
+ if (!ret)
+ ret = pamu_config_spaace(liodn, win_cnt, i,
+ subwin_size, omi_index,
+ 0, dma_domain->snoop_id,
+ dma_domain->stash_id,
+ 0, 0);
+ spin_unlock_irqrestore(&iommu_lock, flags);
+ if (ret) {
+ pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int check_size(u64 size, dma_addr_t iova)
+{
+ /*
+ * Size must be a power of two and at least be equal
+ * to PAMU page size.
+ */
+ if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
+ pr_debug("%s: size too small or not a power of two\n", __func__);
+ return -EINVAL;
+ }
+
+ /* iova must be page size aligned*/
+ if (iova & (size - 1)) {
+ pr_debug("%s: address is not aligned with window size\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
+{
+ struct fsl_dma_domain *domain;
+
+ domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->stash_id = ~(u32)0;
+ domain->snoop_id = ~(u32)0;
+ domain->win_cnt = pamu_get_max_subwin_cnt();
+ domain->geom_size = 0;
+
+ INIT_LIST_HEAD(&domain->devices);
+
+ spin_lock_init(&domain->domain_lock);
+
+ return domain;
+}
+
+static inline struct device_domain_info *find_domain(struct device *dev)
+{
+ return dev->archdata.iommu_domain;
+}
+
+static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
+{
+ unsigned long flags;
+
+ list_del(&info->link);
+ spin_lock_irqsave(&iommu_lock, flags);
+ if (win_cnt > 1)
+ pamu_free_subwins(info->liodn);
+ pamu_disable_liodn(info->liodn);
+ spin_unlock_irqrestore(&iommu_lock, flags);
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info->dev->archdata.iommu_domain = NULL;
+ kmem_cache_free(iommu_devinfo_cache, info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
+{
+ struct device_domain_info *info, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ /* Remove the device from the domain device list */
+ list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
+ if (!dev || (info->dev == dev))
+ remove_device_ref(info, dma_domain->win_cnt);
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+}
+
+static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
+{
+ struct device_domain_info *info, *old_domain_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ /*
+ * Check here if the device is already attached to domain or not.
+ * If the device is already attached to a domain detach it.
+ */
+ old_domain_info = find_domain(dev);
+ if (old_domain_info && old_domain_info->domain != dma_domain) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ detach_device(dev, old_domain_info->domain);
+ spin_lock_irqsave(&device_domain_lock, flags);
+ }
+
+ info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
+
+ info->dev = dev;
+ info->liodn = liodn;
+ info->domain = dma_domain;
+
+ list_add(&info->link, &dma_domain->devices);
+ /*
+ * In case of devices with multiple LIODNs just store
+ * the info for the first LIODN as all
+ * LIODNs share the same domain
+ */
+ if (!old_domain_info)
+ dev->archdata.iommu_domain = info;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+}
+
+static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+
+ if ((iova < domain->geometry.aperture_start) ||
+ iova > (domain->geometry.aperture_end))
+ return 0;
+
+ return get_phys_addr(dma_domain, iova);
+}
+
+static int fsl_pamu_domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap)
+{
+ return cap == IOMMU_CAP_CACHE_COHERENCY;
+}
+
+static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+
+ domain->priv = NULL;
+
+ /* remove all the devices from the device list */
+ detach_device(NULL, dma_domain);
+
+ dma_domain->enabled = 0;
+ dma_domain->mapped = 0;
+
+ kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
+}
+
+static int fsl_pamu_domain_init(struct iommu_domain *domain)
+{
+ struct fsl_dma_domain *dma_domain;
+
+ dma_domain = iommu_alloc_dma_domain();
+ if (!dma_domain) {
+ pr_debug("dma_domain allocation failed\n");
+ return -ENOMEM;
+ }
+ domain->priv = dma_domain;
+ dma_domain->iommu_domain = domain;
+ /* defaul geometry 64 GB i.e. maximum system address */
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end = (1ULL << 36) - 1;
+ domain->geometry.force_aperture = true;
+
+ return 0;
+}
+
+/* Configure geometry settings for all LIODNs associated with domain */
+static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
+ struct iommu_domain_geometry *geom_attr,
+ u32 win_cnt)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ list_for_each_entry(info, &dma_domain->devices, link) {
+ ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
+ geom_attr, win_cnt);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Update stash destination for all LIODNs associated with the domain */
+static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ list_for_each_entry(info, &dma_domain->devices, link) {
+ ret = update_liodn_stash(info->liodn, dma_domain, val);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Update domain mappings for all LIODNs associated with the domain */
+static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ list_for_each_entry(info, &dma_domain->devices, link) {
+ ret = update_liodn(info->liodn, dma_domain, wnd_nr);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ list_for_each_entry(info, &dma_domain->devices, link) {
+ if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
+ ret = pamu_disable_liodn(info->liodn);
+ if (!ret)
+ dma_domain->enabled = 0;
+ } else {
+ ret = pamu_disable_spaace(info->liodn, wnd_nr);
+ }
+ }
+
+ return ret;
+}
+
+static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ if (!dma_domain->win_arr) {
+ pr_debug("Number of windows not configured\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return;
+ }
+
+ if (wnd_nr >= dma_domain->win_cnt) {
+ pr_debug("Invalid window index\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return;
+ }
+
+ if (dma_domain->win_arr[wnd_nr].valid) {
+ ret = disable_domain_win(dma_domain, wnd_nr);
+ if (!ret) {
+ dma_domain->win_arr[wnd_nr].valid = 0;
+ dma_domain->mapped--;
+ }
+ }
+
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+}
+
+static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
+ phys_addr_t paddr, u64 size, int prot)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ struct dma_window *wnd;
+ int pamu_prot = 0;
+ int ret;
+ unsigned long flags;
+ u64 win_size;
+
+ if (prot & IOMMU_READ)
+ pamu_prot |= PAACE_AP_PERMS_QUERY;
+ if (prot & IOMMU_WRITE)
+ pamu_prot |= PAACE_AP_PERMS_UPDATE;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ if (!dma_domain->win_arr) {
+ pr_debug("Number of windows not configured\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -ENODEV;
+ }
+
+ if (wnd_nr >= dma_domain->win_cnt) {
+ pr_debug("Invalid window index\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
+ if (size > win_size) {
+ pr_debug("Invalid window size \n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ if (dma_domain->win_cnt == 1) {
+ if (dma_domain->enabled) {
+ pr_debug("Disable the window before updating the mapping\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EBUSY;
+ }
+
+ ret = check_size(size, domain->geometry.aperture_start);
+ if (ret) {
+ pr_debug("Aperture start not aligned to the size\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+ }
+
+ wnd = &dma_domain->win_arr[wnd_nr];
+ if (!wnd->valid) {
+ wnd->paddr = paddr;
+ wnd->size = size;
+ wnd->prot = pamu_prot;
+
+ ret = update_domain_mapping(dma_domain, wnd_nr);
+ if (!ret) {
+ wnd->valid = 1;
+ dma_domain->mapped++;
+ }
+ } else {
+ pr_debug("Disable the window before updating the mapping\n");
+ ret = -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Attach the LIODN to the DMA domain and configure the geometry
+ * and window mappings.
+ */
+static int handle_attach_device(struct fsl_dma_domain *dma_domain,
+ struct device *dev, const u32 *liodn,
+ int num)
+{
+ unsigned long flags;
+ struct iommu_domain *domain = dma_domain->iommu_domain;
+ int ret = 0;
+ int i;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ for (i = 0; i < num; i++) {
+
+ /* Ensure that LIODN value is valid */
+ if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
+ pr_debug("Invalid liodn %d, attach device failed for %s\n",
+ liodn[i], dev->of_node->full_name);
+ ret = -EINVAL;
+ break;
+ }
+
+ attach_device(dma_domain, liodn[i], dev);
+ /*
+ * Check if geometry has already been configured
+ * for the domain. If yes, set the geometry for
+ * the LIODN.
+ */
+ if (dma_domain->win_arr) {
+ u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
+ ret = pamu_set_liodn(liodn[i], dev, dma_domain,
+ &domain->geometry,
+ win_cnt);
+ if (ret)
+ break;
+ if (dma_domain->mapped) {
+ /*
+ * Create window/subwindow mapping for
+ * the LIODN.
+ */
+ ret = map_liodn(liodn[i], dma_domain);
+ if (ret)
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return ret;
+}
+
+static int fsl_pamu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ const u32 *liodn;
+ u32 liodn_cnt;
+ int len, ret = 0;
+ struct pci_dev *pdev = NULL;
+ struct pci_controller *pci_ctl;
+
+ /*
+ * Use LIODN of the PCI controller while attaching a
+ * PCI device.
+ */
+ if (dev->bus == &pci_bus_type) {
+ pdev = to_pci_dev(dev);
+ pci_ctl = pci_bus_to_host(pdev->bus);
+ /*
+ * make dev point to pci controller device
+ * so we can get the LIODN programmed by
+ * u-boot.
+ */
+ dev = pci_ctl->parent;
+ }
+
+ liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
+ if (liodn) {
+ liodn_cnt = len / sizeof(u32);
+ ret = handle_attach_device(dma_domain, dev,
+ liodn, liodn_cnt);
+ } else {
+ pr_debug("missing fsl,liodn property at %s\n",
+ dev->of_node->full_name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void fsl_pamu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ const u32 *prop;
+ int len;
+ struct pci_dev *pdev = NULL;
+ struct pci_controller *pci_ctl;
+
+ /*
+ * Use LIODN of the PCI controller while detaching a
+ * PCI device.
+ */
+ if (dev->bus == &pci_bus_type) {
+ pdev = to_pci_dev(dev);
+ pci_ctl = pci_bus_to_host(pdev->bus);
+ /*
+ * make dev point to pci controller device
+ * so we can get the LIODN programmed by
+ * u-boot.
+ */
+ dev = pci_ctl->parent;
+ }
+
+ prop = of_get_property(dev->of_node, "fsl,liodn", &len);
+ if (prop)
+ detach_device(dev, dma_domain);
+ else
+ pr_debug("missing fsl,liodn property at %s\n",
+ dev->of_node->full_name);
+}
+
+static int configure_domain_geometry(struct iommu_domain *domain, void *data)
+{
+ struct iommu_domain_geometry *geom_attr = data;
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ dma_addr_t geom_size;
+ unsigned long flags;
+
+ geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
+ /*
+ * Sanity check the geometry size. Also, we do not support
+ * DMA outside of the geometry.
+ */
+ if (check_size(geom_size, geom_attr->aperture_start) ||
+ !geom_attr->force_aperture) {
+ pr_debug("Invalid PAMU geometry attributes\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ if (dma_domain->enabled) {
+ pr_debug("Can't set geometry attributes as domain is active\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EBUSY;
+ }
+
+ /* Copy the domain geometry information */
+ memcpy(&domain->geometry, geom_attr,
+ sizeof(struct iommu_domain_geometry));
+ dma_domain->geom_size = geom_size;
+
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return 0;
+}
+
+/* Set the domain stash attribute */
+static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
+{
+ struct pamu_stash_attribute *stash_attr = data;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+
+ memcpy(&dma_domain->dma_stash, stash_attr,
+ sizeof(struct pamu_stash_attribute));
+
+ dma_domain->stash_id = get_stash_id(stash_attr->cache,
+ stash_attr->cpu);
+ if (dma_domain->stash_id == ~(u32)0) {
+ pr_debug("Invalid stash attributes\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ ret = update_domain_stash(dma_domain, dma_domain->stash_id);
+
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return ret;
+}
+
+/* Configure domain dma state i.e. enable/disable DMA*/
+static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+
+ if (enable && !dma_domain->mapped) {
+ pr_debug("Can't enable DMA domain without valid mapping\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -ENODEV;
+ }
+
+ dma_domain->enabled = enable;
+ list_for_each_entry(info, &dma_domain->devices,
+ link) {
+ ret = (enable) ? pamu_enable_liodn(info->liodn) :
+ pamu_disable_liodn(info->liodn);
+ if (ret)
+ pr_debug("Unable to set dma state for liodn %d",
+ info->liodn);
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return 0;
+}
+
+static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
+ enum iommu_attr attr_type, void *data)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ int ret = 0;
+
+
+ switch (attr_type) {
+ case DOMAIN_ATTR_GEOMETRY:
+ ret = configure_domain_geometry(domain, data);
+ break;
+ case DOMAIN_ATTR_FSL_PAMU_STASH:
+ ret = configure_domain_stash(dma_domain, data);
+ break;
+ case DOMAIN_ATTR_FSL_PAMU_ENABLE:
+ ret = configure_domain_dma_state(dma_domain, *(int *)data);
+ break;
+ default:
+ pr_debug("Unsupported attribute type\n");
+ ret = -EINVAL;
+ break;
+ };
+
+ return ret;
+}
+
+static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
+ enum iommu_attr attr_type, void *data)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ int ret = 0;
+
+
+ switch (attr_type) {
+ case DOMAIN_ATTR_FSL_PAMU_STASH:
+ memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
+ sizeof(struct pamu_stash_attribute));
+ break;
+ case DOMAIN_ATTR_FSL_PAMU_ENABLE:
+ *(int *)data = dma_domain->enabled;
+ break;
+ case DOMAIN_ATTR_FSL_PAMUV1:
+ *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
+ break;
+ default:
+ pr_debug("Unsupported attribute type\n");
+ ret = -EINVAL;
+ break;
+ };
+
+ return ret;
+}
+
+#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
+
+static struct iommu_group *get_device_iommu_group(struct device *dev)
+{
+ struct iommu_group *group;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ group = iommu_group_alloc();
+
+ return group;
+}
+
+static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
+{
+ u32 version;
+
+ /* Check the PCI controller version number by readding BRR1 register */
+ version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
+ version &= PCI_FSL_BRR1_VER;
+ /* If PCI controller version is >= 0x204 we can partition endpoints*/
+ if (version >= 0x204)
+ return 1;
+
+ return 0;
+}
+
+/* Get iommu group information from peer devices or devices on the parent bus */
+static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
+{
+ struct pci_dev *tmp;
+ struct iommu_group *group;
+ struct pci_bus *bus = pdev->bus;
+
+ /*
+ * Traverese the pci bus device list to get
+ * the shared iommu group.
+ */
+ while (bus) {
+ list_for_each_entry(tmp, &bus->devices, bus_list) {
+ if (tmp == pdev)
+ continue;
+ group = iommu_group_get(&tmp->dev);
+ if (group)
+ return group;
+ }
+
+ bus = bus->parent;
+ }
+
+ return NULL;
+}
+
+static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
+{
+ struct pci_controller *pci_ctl;
+ bool pci_endpt_partioning;
+ struct iommu_group *group = NULL;
+ struct pci_dev *bridge, *dma_pdev = NULL;
+
+ pci_ctl = pci_bus_to_host(pdev->bus);
+ pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
+ /* We can partition PCIe devices so assign device group to the device */
+ if (pci_endpt_partioning) {
+ bridge = pci_find_upstream_pcie_bridge(pdev);
+ if (bridge) {
+ if (pci_is_pcie(bridge))
+ dma_pdev = pci_get_domain_bus_and_slot(
+ pci_domain_nr(pdev->bus),
+ bridge->subordinate->number, 0);
+ if (!dma_pdev)
+ dma_pdev = pci_dev_get(bridge);
+ } else
+ dma_pdev = pci_dev_get(pdev);
+
+ /* Account for quirked devices */
+ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+
+ /*
+ * If it's a multifunction device that does not support our
+ * required ACS flags, add to the same group as lowest numbered
+ * function that also does not suport the required ACS flags.
+ */
+ if (dma_pdev->multifunction &&
+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
+ u8 i, slot = PCI_SLOT(dma_pdev->devfn);
+
+ for (i = 0; i < 8; i++) {
+ struct pci_dev *tmp;
+
+ tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
+ if (!tmp)
+ continue;
+
+ if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
+ swap_pci_ref(&dma_pdev, tmp);
+ break;
+ }
+ pci_dev_put(tmp);
+ }
+ }
+
+ /*
+ * Devices on the root bus go through the iommu. If that's not us,
+ * find the next upstream device and test ACS up to the root bus.
+ * Finding the next device may require skipping virtual buses.
+ */
+ while (!pci_is_root_bus(dma_pdev->bus)) {
+ struct pci_bus *bus = dma_pdev->bus;
+
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ goto root_bus;
+ }
+
+ if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
+ break;
+
+ swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
+ }
+
+root_bus:
+ group = get_device_iommu_group(&dma_pdev->dev);
+ pci_dev_put(dma_pdev);
+ /*
+ * PCIe controller is not a paritionable entity
+ * free the controller device iommu_group.
+ */
+ if (pci_ctl->parent->iommu_group)
+ iommu_group_remove_device(pci_ctl->parent);
+ } else {
+ /*
+ * All devices connected to the controller will share the
+ * PCI controllers device group. If this is the first
+ * device to be probed for the pci controller, copy the
+ * device group information from the PCI controller device
+ * node and remove the PCI controller iommu group.
+ * For subsequent devices, the iommu group information can
+ * be obtained from sibling devices (i.e. from the bus_devices
+ * link list).
+ */
+ if (pci_ctl->parent->iommu_group) {
+ group = get_device_iommu_group(pci_ctl->parent);
+ iommu_group_remove_device(pci_ctl->parent);
+ } else
+ group = get_shared_pci_device_group(pdev);
+ }
+
+ return group;
+}
+
+static int fsl_pamu_add_device(struct device *dev)
+{
+ struct iommu_group *group = NULL;
+ struct pci_dev *pdev;
+ const u32 *prop;
+ int ret, len;
+
+ /*
+ * For platform devices we allocate a separate group for
+ * each of the devices.
+ */
+ if (dev->bus == &pci_bus_type) {
+ pdev = to_pci_dev(dev);
+ /* Don't create device groups for virtual PCI bridges */
+ if (pdev->subordinate)
+ return 0;
+
+ group = get_pci_device_group(pdev);
+
+ } else {
+ prop = of_get_property(dev->of_node, "fsl,liodn", &len);
+ if (prop)
+ group = get_device_iommu_group(dev);
+ }
+
+ if (!group || IS_ERR(group))
+ return PTR_ERR(group);
+
+ ret = iommu_group_add_device(group, dev);
+
+ iommu_group_put(group);
+ return ret;
+}
+
+static void fsl_pamu_remove_device(struct device *dev)
+{
+ iommu_group_remove_device(dev);
+}
+
+static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
+ if (dma_domain->enabled) {
+ pr_debug("Can't set geometry attributes as domain is active\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EBUSY;
+ }
+
+ /* Ensure that the geometry has been set for the domain */
+ if (!dma_domain->geom_size) {
+ pr_debug("Please configure geometry before setting the number of windows\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure we have valid window count i.e. it should be less than
+ * maximum permissible limit and should be a power of two.
+ */
+ if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
+ pr_debug("Invalid window count\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
+ ((w_count > 1) ? w_count : 0));
+ if (!ret) {
+ if (dma_domain->win_arr)
+ kfree(dma_domain->win_arr);
+ dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
+ w_count, GFP_ATOMIC);
+ if (!dma_domain->win_arr) {
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -ENOMEM;
+ }
+ dma_domain->win_cnt = w_count;
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return ret;
+}
+
+static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+
+ return dma_domain->win_cnt;
+}
+
+static struct iommu_ops fsl_pamu_ops = {
+ .domain_init = fsl_pamu_domain_init,
+ .domain_destroy = fsl_pamu_domain_destroy,
+ .attach_dev = fsl_pamu_attach_device,
+ .detach_dev = fsl_pamu_detach_device,
+ .domain_window_enable = fsl_pamu_window_enable,
+ .domain_window_disable = fsl_pamu_window_disable,
+ .domain_get_windows = fsl_pamu_get_windows,
+ .domain_set_windows = fsl_pamu_set_windows,
+ .iova_to_phys = fsl_pamu_iova_to_phys,
+ .domain_has_cap = fsl_pamu_domain_has_cap,
+ .domain_set_attr = fsl_pamu_set_domain_attr,
+ .domain_get_attr = fsl_pamu_get_domain_attr,
+ .add_device = fsl_pamu_add_device,
+ .remove_device = fsl_pamu_remove_device,
+};
+
+int pamu_domain_init()
+{
+ int ret = 0;
+
+ ret = iommu_init_mempool();
+ if (ret)
+ return ret;
+
+ bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
+ bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
+
+ return ret;
+}
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
new file mode 100644
index 0000000..c90293f
--- /dev/null
+++ b/drivers/iommu/fsl_pamu_domain.h
@@ -0,0 +1,85 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_PAMU_DOMAIN_H
+#define __FSL_PAMU_DOMAIN_H
+
+#include "fsl_pamu.h"
+
+struct dma_window {
+ phys_addr_t paddr;
+ u64 size;
+ int valid;
+ int prot;
+};
+
+struct fsl_dma_domain {
+ /*
+ * Indicates the geometry size for the domain.
+ * This would be set when the geometry is
+ * configured for the domain.
+ */
+ dma_addr_t geom_size;
+ /*
+ * Number of windows assocaited with this domain.
+ * During domain initialization, it is set to the
+ * the maximum number of subwindows allowed for a LIODN.
+ * Minimum value for this is 1 indicating a single PAMU
+ * window, without any sub windows. Value can be set/
+ * queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS.
+ * Value can only be set once the geometry has been configured.
+ */
+ u32 win_cnt;
+ /*
+ * win_arr contains information of the configured
+ * windows for a domain. This is allocated only
+ * when the number of windows for the domain are
+ * set.
+ */
+ struct dma_window *win_arr;
+ /* list of devices associated with the domain */
+ struct list_head devices;
+ /* dma_domain states:
+ * mapped - A particular mapping has been created
+ * within the configured geometry.
+ * enabled - DMA has been enabled for the given
+ * domain. This translates to setting of the
+ * valid bit for the primary PAACE in the PAMU
+ * PAACT table. Domain geometry should be set and
+ * it must have a valid mapping before DMA can be
+ * enabled for it.
+ *
+ */
+ int mapped;
+ int enabled;
+ /* stash_id obtained from the stash attribute details */
+ u32 stash_id;
+ struct pamu_stash_attribute dma_stash;
+ u32 snoop_id;
+ struct iommu_domain *iommu_domain;
+ spinlock_t domain_lock;
+};
+
+/* domain-device relationship */
+struct device_domain_info {
+ struct list_head link; /* link to domain siblings */
+ struct device *dev;
+ u32 liodn;
+ struct fsl_dma_domain *domain; /* pointer to domain */
+};
+#endif /* __FSL_PAMU_DOMAIN_H */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index eec0d3e..15e9b57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -890,56 +890,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
return order;
}
+static void dma_pte_free_level(struct dmar_domain *domain, int level,
+ struct dma_pte *pte, unsigned long pfn,
+ unsigned long start_pfn, unsigned long last_pfn)
+{
+ pfn = max(start_pfn, pfn);
+ pte = &pte[pfn_level_offset(pfn, level)];
+
+ do {
+ unsigned long level_pfn;
+ struct dma_pte *level_pte;
+
+ if (!dma_pte_present(pte) || dma_pte_superpage(pte))
+ goto next;
+
+ level_pfn = pfn & level_mask(level - 1);
+ level_pte = phys_to_virt(dma_pte_addr(pte));
+
+ if (level > 2)
+ dma_pte_free_level(domain, level - 1, level_pte,
+ level_pfn, start_pfn, last_pfn);
+
+ /* If range covers entire pagetable, free it */
+ if (!(start_pfn > level_pfn ||
+ last_pfn < level_pfn + level_size(level))) {
+ dma_clear_pte(pte);
+ domain_flush_cache(domain, pte, sizeof(*pte));
+ free_pgtable_page(level_pte);
+ }
+next:
+ pfn += level_size(level);
+ } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
+}
+
/* free page table pages. last level pte should already be cleared */
static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- struct dma_pte *first_pte, *pte;
- int total = agaw_to_level(domain->agaw);
- int level;
- unsigned long tmp;
- int large_page = 2;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
BUG_ON(start_pfn > last_pfn);
/* We don't need lock here; nobody else touches the iova range */
- level = 2;
- while (level <= total) {
- tmp = align_to_level(start_pfn, level);
-
- /* If we can't even clear one PTE at this level, we're done */
- if (tmp + level_size(level) - 1 > last_pfn)
- return;
-
- do {
- large_page = level;
- first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
- if (large_page > level)
- level = large_page + 1;
- if (!pte) {
- tmp = align_to_level(tmp + 1, level + 1);
- continue;
- }
- do {
- if (dma_pte_present(pte)) {
- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
- dma_clear_pte(pte);
- }
- pte++;
- tmp += level_size(level);
- } while (!first_pte_in_page(pte) &&
- tmp + level_size(level) - 1 <= last_pfn);
+ dma_pte_free_level(domain, agaw_to_level(domain->agaw),
+ domain->pgd, 0, start_pfn, last_pfn);
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
-
- } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
- level++;
- }
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
free_pgtable_page(domain->pgd);
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 0a1c962..08ba497 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -282,7 +282,6 @@ static int msm_iommu_remove(struct platform_device *pdev)
clk_put(drv->pclk);
memset(drv, 0, sizeof(*drv));
kfree(drv);
- platform_set_drvdata(pdev, NULL);
}
return 0;
}
@@ -366,7 +365,6 @@ static int msm_iommu_ctx_remove(struct platform_device *pdev)
if (drv) {
memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
kfree(drv);
- platform_set_drvdata(pdev, NULL);
}
return 0;
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 0ba3766..bcd78a7 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1008,8 +1008,6 @@ static int omap_iommu_remove(struct platform_device *pdev)
struct resource *res;
struct omap_iommu *obj = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
iopgtable_clear_entry_all(obj);
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 1fea003..3792a1a 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -30,6 +30,11 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
+config IMGPDC_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config ORION_IRQCHIP
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e65c41a..c60b901 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
+obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
obj-$(CONFIG_ARCH_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
@@ -14,6 +15,7 @@ obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
+obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ee7c503..d0e9480 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -453,6 +453,12 @@ static void gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_CPU_CTRL);
}
+void gic_cpu_if_down(void)
+{
+ void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+ writel_relaxed(0, cpu_base + GIC_CPU_CTRL);
+}
+
#ifdef CONFIG_CPU_PM
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
new file mode 100644
index 0000000..8071c2e
--- /dev/null
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -0,0 +1,499 @@
+/*
+ * IMG PowerDown Controller (PDC)
+ *
+ * Copyright 2010-2013 Imagination Technologies Ltd.
+ *
+ * Exposes the syswake and PDC peripheral wake interrupts to the system.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* PDC interrupt register numbers */
+
+#define PDC_IRQ_STATUS 0x310
+#define PDC_IRQ_ENABLE 0x314
+#define PDC_IRQ_CLEAR 0x318
+#define PDC_IRQ_ROUTE 0x31c
+#define PDC_SYS_WAKE_BASE 0x330
+#define PDC_SYS_WAKE_STRIDE 0x8
+#define PDC_SYS_WAKE_CONFIG_BASE 0x334
+#define PDC_SYS_WAKE_CONFIG_STRIDE 0x8
+
+/* PDC interrupt register field masks */
+
+#define PDC_IRQ_SYS3 0x08
+#define PDC_IRQ_SYS2 0x04
+#define PDC_IRQ_SYS1 0x02
+#define PDC_IRQ_SYS0 0x01
+#define PDC_IRQ_ROUTE_WU_EN_SYS3 0x08000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS2 0x04000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS1 0x02000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS0 0x01000000
+#define PDC_IRQ_ROUTE_WU_EN_WD 0x00040000
+#define PDC_IRQ_ROUTE_WU_EN_IR 0x00020000
+#define PDC_IRQ_ROUTE_WU_EN_RTC 0x00010000
+#define PDC_IRQ_ROUTE_EXT_EN_SYS3 0x00000800
+#define PDC_IRQ_ROUTE_EXT_EN_SYS2 0x00000400
+#define PDC_IRQ_ROUTE_EXT_EN_SYS1 0x00000200
+#define PDC_IRQ_ROUTE_EXT_EN_SYS0 0x00000100
+#define PDC_IRQ_ROUTE_EXT_EN_WD 0x00000004
+#define PDC_IRQ_ROUTE_EXT_EN_IR 0x00000002
+#define PDC_IRQ_ROUTE_EXT_EN_RTC 0x00000001
+#define PDC_SYS_WAKE_RESET 0x00000010
+#define PDC_SYS_WAKE_INT_MODE 0x0000000e
+#define PDC_SYS_WAKE_INT_MODE_SHIFT 1
+#define PDC_SYS_WAKE_PIN_VAL 0x00000001
+
+/* PDC interrupt constants */
+
+#define PDC_SYS_WAKE_INT_LOW 0x0
+#define PDC_SYS_WAKE_INT_HIGH 0x1
+#define PDC_SYS_WAKE_INT_DOWN 0x2
+#define PDC_SYS_WAKE_INT_UP 0x3
+#define PDC_SYS_WAKE_INT_CHANGE 0x6
+#define PDC_SYS_WAKE_INT_NONE 0x4
+
+/**
+ * struct pdc_intc_priv - private pdc interrupt data.
+ * @nr_perips: Number of peripheral interrupt signals.
+ * @nr_syswakes: Number of syswake signals.
+ * @perip_irqs: List of peripheral IRQ numbers handled.
+ * @syswake_irq: Shared PDC syswake IRQ number.
+ * @domain: IRQ domain for PDC peripheral and syswake IRQs.
+ * @pdc_base: Base of PDC registers.
+ * @irq_route: Cached version of PDC_IRQ_ROUTE register.
+ * @lock: Lock to protect the PDC syswake registers and the cached
+ * values of those registers in this struct.
+ */
+struct pdc_intc_priv {
+ unsigned int nr_perips;
+ unsigned int nr_syswakes;
+ unsigned int *perip_irqs;
+ unsigned int syswake_irq;
+ struct irq_domain *domain;
+ void __iomem *pdc_base;
+
+ u32 irq_route;
+ raw_spinlock_t lock;
+};
+
+static void pdc_write(struct pdc_intc_priv *priv, unsigned int reg_offs,
+ unsigned int data)
+{
+ iowrite32(data, priv->pdc_base + reg_offs);
+}
+
+static unsigned int pdc_read(struct pdc_intc_priv *priv,
+ unsigned int reg_offs)
+{
+ return ioread32(priv->pdc_base + reg_offs);
+}
+
+/* Generic IRQ callbacks */
+
+#define SYS0_HWIRQ 8
+
+static unsigned int hwirq_is_syswake(irq_hw_number_t hw)
+{
+ return hw >= SYS0_HWIRQ;
+}
+
+static unsigned int hwirq_to_syswake(irq_hw_number_t hw)
+{
+ return hw - SYS0_HWIRQ;
+}
+
+static irq_hw_number_t syswake_to_hwirq(unsigned int syswake)
+{
+ return SYS0_HWIRQ + syswake;
+}
+
+static struct pdc_intc_priv *irqd_to_priv(struct irq_data *data)
+{
+ return (struct pdc_intc_priv *)data->domain->host_data;
+}
+
+/*
+ * perip_irq_mask() and perip_irq_unmask() use IRQ_ROUTE which also contains
+ * wake bits, therefore we cannot use the generic irqchip mask callbacks as they
+ * cache the mask.
+ */
+
+static void perip_irq_mask(struct irq_data *data)
+{
+ struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+ raw_spin_lock(&priv->lock);
+ priv->irq_route &= ~data->mask;
+ pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+ raw_spin_unlock(&priv->lock);
+}
+
+static void perip_irq_unmask(struct irq_data *data)
+{
+ struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+ raw_spin_lock(&priv->lock);
+ priv->irq_route |= data->mask;
+ pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+ raw_spin_unlock(&priv->lock);
+}
+
+static int syswake_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct pdc_intc_priv *priv = irqd_to_priv(data);
+ unsigned int syswake = hwirq_to_syswake(data->hwirq);
+ unsigned int irq_mode;
+ unsigned int soc_sys_wake_regoff, soc_sys_wake;
+
+ /* translate to syswake IRQ mode */
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_mode = PDC_SYS_WAKE_INT_CHANGE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_mode = PDC_SYS_WAKE_INT_UP;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_mode = PDC_SYS_WAKE_INT_DOWN;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_mode = PDC_SYS_WAKE_INT_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_mode = PDC_SYS_WAKE_INT_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ raw_spin_lock(&priv->lock);
+
+ /* set the IRQ mode */
+ soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + syswake*PDC_SYS_WAKE_STRIDE;
+ soc_sys_wake = pdc_read(priv, soc_sys_wake_regoff);
+ soc_sys_wake &= ~PDC_SYS_WAKE_INT_MODE;
+ soc_sys_wake |= irq_mode << PDC_SYS_WAKE_INT_MODE_SHIFT;
+ pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+
+ /* and update the handler */
+ irq_setup_alt_chip(data, flow_type);
+
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* applies to both peripheral and syswake interrupts */
+static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct pdc_intc_priv *priv = irqd_to_priv(data);
+ irq_hw_number_t hw = data->hwirq;
+ unsigned int mask = (1 << 16) << hw;
+ unsigned int dst_irq;
+
+ raw_spin_lock(&priv->lock);
+ if (on)
+ priv->irq_route |= mask;
+ else
+ priv->irq_route &= ~mask;
+ pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+ raw_spin_unlock(&priv->lock);
+
+ /* control the destination IRQ wakeup too for standby mode */
+ if (hwirq_is_syswake(hw))
+ dst_irq = priv->syswake_irq;
+ else
+ dst_irq = priv->perip_irqs[hw];
+ irq_set_irq_wake(dst_irq, on);
+
+ return 0;
+}
+
+static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc)
+{
+ struct pdc_intc_priv *priv;
+ unsigned int i, irq_no;
+
+ priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+ /* find the peripheral number */
+ for (i = 0; i < priv->nr_perips; ++i)
+ if (irq == priv->perip_irqs[i])
+ goto found;
+
+ /* should never get here */
+ return;
+found:
+
+ /* pass on the interrupt */
+ irq_no = irq_linear_revmap(priv->domain, i);
+ generic_handle_irq(irq_no);
+}
+
+static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc)
+{
+ struct pdc_intc_priv *priv;
+ unsigned int syswake, irq_no;
+ unsigned int status;
+
+ priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+ status = pdc_read(priv, PDC_IRQ_STATUS) &
+ pdc_read(priv, PDC_IRQ_ENABLE);
+ status &= (1 << priv->nr_syswakes) - 1;
+
+ for (syswake = 0; status; status >>= 1, ++syswake) {
+ /* Has this sys_wake triggered? */
+ if (!(status & 1))
+ continue;
+
+ irq_no = irq_linear_revmap(priv->domain,
+ syswake_to_hwirq(syswake));
+ generic_handle_irq(irq_no);
+ }
+}
+
+static void pdc_intc_setup(struct pdc_intc_priv *priv)
+{
+ int i;
+ unsigned int soc_sys_wake_regoff;
+ unsigned int soc_sys_wake;
+
+ /*
+ * Mask all syswake interrupts before routing, or we could receive an
+ * interrupt before we're ready to handle it.
+ */
+ pdc_write(priv, PDC_IRQ_ENABLE, 0);
+
+ /*
+ * Enable routing of all syswakes
+ * Disable all wake sources
+ */
+ priv->irq_route = ((PDC_IRQ_ROUTE_EXT_EN_SYS0 << priv->nr_syswakes) -
+ PDC_IRQ_ROUTE_EXT_EN_SYS0);
+ pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+
+ /* Initialise syswake IRQ */
+ for (i = 0; i < priv->nr_syswakes; ++i) {
+ /* set the IRQ mode to none */
+ soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + i*PDC_SYS_WAKE_STRIDE;
+ soc_sys_wake = PDC_SYS_WAKE_INT_NONE
+ << PDC_SYS_WAKE_INT_MODE_SHIFT;
+ pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+ }
+}
+
+static int pdc_intc_probe(struct platform_device *pdev)
+{
+ struct pdc_intc_priv *priv;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res_regs;
+ struct irq_chip_generic *gc;
+ unsigned int i;
+ int irq, ret;
+ u32 val;
+
+ if (!node)
+ return -ENOENT;
+
+ /* Get registers */
+ res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_regs == NULL) {
+ dev_err(&pdev->dev, "cannot find registers resource\n");
+ return -ENOENT;
+ }
+
+ /* Allocate driver data */
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "cannot allocate device data\n");
+ return -ENOMEM;
+ }
+ raw_spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ /* Ioremap the registers */
+ priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
+ res_regs->end - res_regs->start);
+ if (!priv->pdc_base)
+ return -EIO;
+
+ /* Get number of peripherals */
+ ret = of_property_read_u32(node, "num-perips", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "No num-perips node property found\n");
+ return -EINVAL;
+ }
+ if (val > SYS0_HWIRQ) {
+ dev_err(&pdev->dev, "num-perips (%u) out of range\n", val);
+ return -EINVAL;
+ }
+ priv->nr_perips = val;
+
+ /* Get number of syswakes */
+ ret = of_property_read_u32(node, "num-syswakes", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "No num-syswakes node property found\n");
+ return -EINVAL;
+ }
+ if (val > SYS0_HWIRQ) {
+ dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val);
+ return -EINVAL;
+ }
+ priv->nr_syswakes = val;
+
+ /* Get peripheral IRQ numbers */
+ priv->perip_irqs = devm_kzalloc(&pdev->dev, 4 * priv->nr_perips,
+ GFP_KERNEL);
+ if (!priv->perip_irqs) {
+ dev_err(&pdev->dev, "cannot allocate perip IRQ list\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < priv->nr_perips; ++i) {
+ irq = platform_get_irq(pdev, 1 + i);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i);
+ return irq;
+ }
+ priv->perip_irqs[i] = irq;
+ }
+ /* check if too many were provided */
+ if (platform_get_irq(pdev, 1 + i) >= 0) {
+ dev_err(&pdev->dev, "surplus perip IRQs detected\n");
+ return -EINVAL;
+ }
+
+ /* Get syswake IRQ number */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "cannot find syswake IRQ\n");
+ return irq;
+ }
+ priv->syswake_irq = irq;
+
+ /* Set up an IRQ domain */
+ priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
+ priv);
+ if (unlikely(!priv->domain)) {
+ dev_err(&pdev->dev, "cannot add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Set up 2 generic irq chips with 2 chip types.
+ * The first one for peripheral irqs (only 1 chip type used)
+ * The second one for syswake irqs (edge and level chip types)
+ */
+ ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc",
+ handle_level_irq, 0, 0,
+ IRQ_GC_INIT_NESTED_LOCK);
+ if (ret)
+ goto err_generic;
+
+ /* peripheral interrupt chip */
+
+ gc = irq_get_domain_generic_chip(priv->domain, 0);
+ gc->unused = ~(BIT(priv->nr_perips) - 1);
+ gc->reg_base = priv->pdc_base;
+ /*
+ * IRQ_ROUTE contains wake bits, so we can't use the generic versions as
+ * they cache the mask
+ */
+ gc->chip_types[0].regs.mask = PDC_IRQ_ROUTE;
+ gc->chip_types[0].chip.irq_mask = perip_irq_mask;
+ gc->chip_types[0].chip.irq_unmask = perip_irq_unmask;
+ gc->chip_types[0].chip.irq_set_wake = pdc_irq_set_wake;
+
+ /* syswake interrupt chip */
+
+ gc = irq_get_domain_generic_chip(priv->domain, 8);
+ gc->unused = ~(BIT(priv->nr_syswakes) - 1);
+ gc->reg_base = priv->pdc_base;
+
+ /* edge interrupts */
+ gc->chip_types[0].type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types[0].handler = handle_edge_irq;
+ gc->chip_types[0].regs.ack = PDC_IRQ_CLEAR;
+ gc->chip_types[0].regs.mask = PDC_IRQ_ENABLE;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_set_type = syswake_irq_set_type;
+ gc->chip_types[0].chip.irq_set_wake = pdc_irq_set_wake;
+ /* for standby we pass on to the shared syswake IRQ */
+ gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ /* level interrupts */
+ gc->chip_types[1].type = IRQ_TYPE_LEVEL_MASK;
+ gc->chip_types[1].handler = handle_level_irq;
+ gc->chip_types[1].regs.ack = PDC_IRQ_CLEAR;
+ gc->chip_types[1].regs.mask = PDC_IRQ_ENABLE;
+ gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_set_type = syswake_irq_set_type;
+ gc->chip_types[1].chip.irq_set_wake = pdc_irq_set_wake;
+ /* for standby we pass on to the shared syswake IRQ */
+ gc->chip_types[1].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ /* Set up the hardware to enable interrupt routing */
+ pdc_intc_setup(priv);
+
+ /* Setup chained handlers for the peripheral IRQs */
+ for (i = 0; i < priv->nr_perips; ++i) {
+ irq = priv->perip_irqs[i];
+ irq_set_handler_data(irq, priv);
+ irq_set_chained_handler(irq, pdc_intc_perip_isr);
+ }
+
+ /* Setup chained handler for the syswake IRQ */
+ irq_set_handler_data(priv->syswake_irq, priv);
+ irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);
+
+ dev_info(&pdev->dev,
+ "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
+ priv->nr_perips,
+ priv->nr_syswakes);
+
+ return 0;
+err_generic:
+ irq_domain_remove(priv->domain);
+ return ret;
+}
+
+static int pdc_intc_remove(struct platform_device *pdev)
+{
+ struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
+
+ irq_domain_remove(priv->domain);
+ return 0;
+}
+
+static const struct of_device_id pdc_intc_match[] = {
+ { .compatible = "img,pdc-intc" },
+ {}
+};
+
+static struct platform_driver pdc_intc_driver = {
+ .driver = {
+ .name = "pdc-intc",
+ .of_match_table = pdc_intc_match,
+ },
+ .probe = pdc_intc_probe,
+ .remove = pdc_intc_remove,
+};
+
+static int __init pdc_intc_init(void)
+{
+ return platform_driver_register(&pdc_intc_driver);
+}
+core_initcall(pdc_intc_init);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
new file mode 100644
index 0000000..2cb7cd0
--- /dev/null
+++ b/drivers/irqchip/irq-mmp.c
@@ -0,0 +1,495 @@
+/*
+ * linux/arch/arm/mach-mmp/irq.c
+ *
+ * Generic IRQ handling, GPIO IRQ demultiplexing, etc.
+ * Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
+ *
+ * Author: Bin Yang <bin.yang@marvell.com>
+ * Haojian Zhuang <haojian.zhuang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+#define MAX_ICU_NR 16
+
+#define PJ1_INT_SEL 0x10c
+#define PJ4_INT_SEL 0x104
+
+/* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
+#define SEL_INT_PENDING (1 << 6)
+#define SEL_INT_NUM_MASK 0x3f
+
+struct icu_chip_data {
+ int nr_irqs;
+ unsigned int virq_base;
+ unsigned int cascade_irq;
+ void __iomem *reg_status;
+ void __iomem *reg_mask;
+ unsigned int conf_enable;
+ unsigned int conf_disable;
+ unsigned int conf_mask;
+ unsigned int clr_mfp_irq_base;
+ unsigned int clr_mfp_hwirq;
+ struct irq_domain *domain;
+};
+
+struct mmp_intc_conf {
+ unsigned int conf_enable;
+ unsigned int conf_disable;
+ unsigned int conf_mask;
+};
+
+static void __iomem *mmp_icu_base;
+static struct icu_chip_data icu_data[MAX_ICU_NR];
+static int max_icu_nr;
+
+extern void mmp2_clear_pmic_int(void);
+
+static void icu_mask_ack_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_disable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+ } else {
+#ifdef CONFIG_CPU_MMP2
+ if ((data->virq_base == data->clr_mfp_irq_base)
+ && (hwirq == data->clr_mfp_hwirq))
+ mmp2_clear_pmic_int();
+#endif
+ r = readl_relaxed(data->reg_mask) | (1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+static void icu_mask_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_disable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+ } else {
+ r = readl_relaxed(data->reg_mask) | (1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+static void icu_unmask_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_enable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+ } else {
+ r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+struct irq_chip icu_irq_chip = {
+ .name = "icu_irq",
+ .irq_mask = icu_mask_irq,
+ .irq_mask_ack = icu_mask_ack_irq,
+ .irq_unmask = icu_unmask_irq,
+};
+
+static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_domain *domain;
+ struct icu_chip_data *data;
+ int i;
+ unsigned long mask, status, n;
+
+ for (i = 1; i < max_icu_nr; i++) {
+ if (irq == icu_data[i].cascade_irq) {
+ domain = icu_data[i].domain;
+ data = (struct icu_chip_data *)domain->host_data;
+ break;
+ }
+ }
+ if (i >= max_icu_nr) {
+ pr_err("Spurious irq %d in MMP INTC\n", irq);
+ return;
+ }
+
+ mask = readl_relaxed(data->reg_mask);
+ while (1) {
+ status = readl_relaxed(data->reg_status) & ~mask;
+ if (status == 0)
+ break;
+ for_each_set_bit(n, &status, BITS_PER_LONG) {
+ generic_handle_irq(icu_data[i].virq_base + n);
+ }
+ }
+}
+
+static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ return 0;
+}
+
+static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ *out_hwirq = intspec[0];
+ return 0;
+}
+
+const struct irq_domain_ops mmp_irq_domain_ops = {
+ .map = mmp_irq_domain_map,
+ .xlate = mmp_irq_domain_xlate,
+};
+
+static struct mmp_intc_conf mmp_conf = {
+ .conf_enable = 0x51,
+ .conf_disable = 0x0,
+ .conf_mask = 0x7f,
+};
+
+static struct mmp_intc_conf mmp2_conf = {
+ .conf_enable = 0x20,
+ .conf_disable = 0x0,
+ .conf_mask = 0x7f,
+};
+
+static asmlinkage void __exception_irq_entry
+mmp_handle_irq(struct pt_regs *regs)
+{
+ int irq, hwirq;
+
+ hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
+ if (!(hwirq & SEL_INT_PENDING))
+ return;
+ hwirq &= SEL_INT_NUM_MASK;
+ irq = irq_find_mapping(icu_data[0].domain, hwirq);
+ handle_IRQ(irq, regs);
+}
+
+static asmlinkage void __exception_irq_entry
+mmp2_handle_irq(struct pt_regs *regs)
+{
+ int irq, hwirq;
+
+ hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
+ if (!(hwirq & SEL_INT_PENDING))
+ return;
+ hwirq &= SEL_INT_NUM_MASK;
+ irq = irq_find_mapping(icu_data[0].domain, hwirq);
+ handle_IRQ(irq, regs);
+}
+
+/* MMP (ARMv5) */
+void __init icu_init_irq(void)
+{
+ int irq;
+
+ max_icu_nr = 1;
+ mmp_icu_base = ioremap(0xd4282000, 0x1000);
+ icu_data[0].conf_enable = mmp_conf.conf_enable;
+ icu_data[0].conf_disable = mmp_conf.conf_disable;
+ icu_data[0].conf_mask = mmp_conf.conf_mask;
+ icu_data[0].nr_irqs = 64;
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
+ &irq_domain_simple_ops,
+ &icu_data[0]);
+ for (irq = 0; irq < 64; irq++) {
+ icu_mask_irq(irq_get_irq_data(irq));
+ irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp_handle_irq);
+}
+
+/* MMP2 (ARMv7) */
+void __init mmp2_init_icu(void)
+{
+ int irq, end;
+
+ max_icu_nr = 8;
+ mmp_icu_base = ioremap(0xd4282000, 0x1000);
+ icu_data[0].conf_enable = mmp2_conf.conf_enable;
+ icu_data[0].conf_disable = mmp2_conf.conf_disable;
+ icu_data[0].conf_mask = mmp2_conf.conf_mask;
+ icu_data[0].nr_irqs = 64;
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
+ &irq_domain_simple_ops,
+ &icu_data[0]);
+ icu_data[1].reg_status = mmp_icu_base + 0x150;
+ icu_data[1].reg_mask = mmp_icu_base + 0x168;
+ icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
+ icu_data[0].nr_irqs;
+ icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */
+ icu_data[1].nr_irqs = 2;
+ icu_data[1].cascade_irq = 4;
+ icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
+ icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
+ icu_data[1].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[1]);
+ icu_data[2].reg_status = mmp_icu_base + 0x154;
+ icu_data[2].reg_mask = mmp_icu_base + 0x16c;
+ icu_data[2].nr_irqs = 2;
+ icu_data[2].cascade_irq = 5;
+ icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
+ icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
+ icu_data[2].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[2]);
+ icu_data[3].reg_status = mmp_icu_base + 0x180;
+ icu_data[3].reg_mask = mmp_icu_base + 0x17c;
+ icu_data[3].nr_irqs = 3;
+ icu_data[3].cascade_irq = 9;
+ icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
+ icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
+ icu_data[3].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[3]);
+ icu_data[4].reg_status = mmp_icu_base + 0x158;
+ icu_data[4].reg_mask = mmp_icu_base + 0x170;
+ icu_data[4].nr_irqs = 5;
+ icu_data[4].cascade_irq = 17;
+ icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
+ icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
+ icu_data[4].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[4]);
+ icu_data[5].reg_status = mmp_icu_base + 0x15c;
+ icu_data[5].reg_mask = mmp_icu_base + 0x174;
+ icu_data[5].nr_irqs = 15;
+ icu_data[5].cascade_irq = 35;
+ icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
+ icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
+ icu_data[5].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[5]);
+ icu_data[6].reg_status = mmp_icu_base + 0x160;
+ icu_data[6].reg_mask = mmp_icu_base + 0x178;
+ icu_data[6].nr_irqs = 2;
+ icu_data[6].cascade_irq = 51;
+ icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
+ icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
+ icu_data[6].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[6]);
+ icu_data[7].reg_status = mmp_icu_base + 0x188;
+ icu_data[7].reg_mask = mmp_icu_base + 0x184;
+ icu_data[7].nr_irqs = 2;
+ icu_data[7].cascade_irq = 55;
+ icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
+ icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
+ icu_data[7].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[7]);
+ end = icu_data[7].virq_base + icu_data[7].nr_irqs;
+ for (irq = 0; irq < end; irq++) {
+ icu_mask_irq(irq_get_irq_data(irq));
+ if (irq == icu_data[1].cascade_irq ||
+ irq == icu_data[2].cascade_irq ||
+ irq == icu_data[3].cascade_irq ||
+ irq == icu_data[4].cascade_irq ||
+ irq == icu_data[5].cascade_irq ||
+ irq == icu_data[6].cascade_irq ||
+ irq == icu_data[7].cascade_irq) {
+ irq_set_chip(irq, &icu_irq_chip);
+ irq_set_chained_handler(irq, icu_mux_irq_demux);
+ } else {
+ irq_set_chip_and_handler(irq, &icu_irq_chip,
+ handle_level_irq);
+ }
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp2_handle_irq);
+}
+
+#ifdef CONFIG_OF
+static int __init mmp_init_bases(struct device_node *node)
+{
+ int ret, nr_irqs, irq, i = 0;
+
+ ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
+ if (ret) {
+ pr_err("Not found mrvl,intc-nr-irqs property\n");
+ return ret;
+ }
+
+ mmp_icu_base = of_iomap(node, 0);
+ if (!mmp_icu_base) {
+ pr_err("Failed to get interrupt controller register\n");
+ return -ENOMEM;
+ }
+
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[0]);
+ for (irq = 0; irq < nr_irqs; irq++) {
+ ret = irq_create_mapping(icu_data[0].domain, irq);
+ if (!ret) {
+ pr_err("Failed to mapping hwirq\n");
+ goto err;
+ }
+ if (!irq)
+ icu_data[0].virq_base = ret;
+ }
+ icu_data[0].nr_irqs = nr_irqs;
+ return 0;
+err:
+ if (icu_data[0].virq_base) {
+ for (i = 0; i < irq; i++)
+ irq_dispose_mapping(icu_data[0].virq_base + i);
+ }
+ irq_domain_remove(icu_data[0].domain);
+ iounmap(mmp_icu_base);
+ return -EINVAL;
+}
+
+static int __init mmp_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ ret = mmp_init_bases(node);
+ if (ret < 0)
+ return ret;
+
+ icu_data[0].conf_enable = mmp_conf.conf_enable;
+ icu_data[0].conf_disable = mmp_conf.conf_disable;
+ icu_data[0].conf_mask = mmp_conf.conf_mask;
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp_handle_irq);
+ max_icu_nr = 1;
+ return 0;
+}
+IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
+
+static int __init mmp2_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ ret = mmp_init_bases(node);
+ if (ret < 0)
+ return ret;
+
+ icu_data[0].conf_enable = mmp2_conf.conf_enable;
+ icu_data[0].conf_disable = mmp2_conf.conf_disable;
+ icu_data[0].conf_mask = mmp2_conf.conf_mask;
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp2_handle_irq);
+ max_icu_nr = 1;
+ return 0;
+}
+IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
+
+static int __init mmp2_mux_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ int i, ret, irq, j = 0;
+ u32 nr_irqs, mfp_irq;
+
+ if (!parent)
+ return -ENODEV;
+
+ i = max_icu_nr;
+ ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
+ &nr_irqs);
+ if (ret) {
+ pr_err("Not found mrvl,intc-nr-irqs property\n");
+ return -EINVAL;
+ }
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret < 0) {
+ pr_err("Not found reg property\n");
+ return -EINVAL;
+ }
+ icu_data[i].reg_status = mmp_icu_base + res.start;
+ ret = of_address_to_resource(node, 1, &res);
+ if (ret < 0) {
+ pr_err("Not found reg property\n");
+ return -EINVAL;
+ }
+ icu_data[i].reg_mask = mmp_icu_base + res.start;
+ icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
+ if (!icu_data[i].cascade_irq)
+ return -EINVAL;
+
+ icu_data[i].virq_base = 0;
+ icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[i]);
+ for (irq = 0; irq < nr_irqs; irq++) {
+ ret = irq_create_mapping(icu_data[i].domain, irq);
+ if (!ret) {
+ pr_err("Failed to mapping hwirq\n");
+ goto err;
+ }
+ if (!irq)
+ icu_data[i].virq_base = ret;
+ }
+ icu_data[i].nr_irqs = nr_irqs;
+ if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
+ &mfp_irq)) {
+ icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
+ icu_data[i].clr_mfp_hwirq = mfp_irq;
+ }
+ irq_set_chained_handler(icu_data[i].cascade_irq,
+ icu_mux_irq_demux);
+ max_icu_nr++;
+ return 0;
+err:
+ if (icu_data[i].virq_base) {
+ for (j = 0; j < irq; j++)
+ irq_dispose_mapping(icu_data[i].virq_base + j);
+ }
+ irq_domain_remove(icu_data[i].domain);
+ return -EINVAL;
+}
+IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
+#endif
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 7f910c7..3c92780 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2295,8 +2295,8 @@ _hfcpci_softirq(struct device *dev, void *arg)
static void
hfcpci_softirq(void *arg)
{
- (void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
- _hfcpci_softirq);
+ WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
+ _hfcpci_softirq) != 0);
/* if next event would be in the past ... */
if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 1063bab..36817e0 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -314,7 +314,7 @@ Amd7930_empty_Dfifo(struct IsdnCardState *cs, int flag)
t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx);
QuickHex(t, cs->rcvbuf, cs->rcvidx);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
/* moves received data in sk-buffer */
memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx);
@@ -406,7 +406,7 @@ Amd7930_fill_Dfifo(struct IsdnCardState *cs)
t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count);
QuickHex(t, deb_ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
/* AMD interrupts on */
AmdIrqOn(cs);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index ee9b9a0..d1427bd 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -285,7 +285,7 @@ hdlc_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "hdlc_empty_fifo %c cnt %d",
bcs->channel ? 'B' : 'A', count);
QuickHex(t, p, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -345,7 +345,7 @@ hdlc_fill_fifo(struct BCState *bcs)
t += sprintf(t, "hdlc_fill_fifo %c cnt %d",
bcs->channel ? 'B' : 'A', count);
QuickHex(t, p, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index bf04d2a..b33f53b 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ",
"warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 8d0cf6e..4fc90de 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -427,7 +427,7 @@ Memhscx_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "hscx_empty_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -469,7 +469,7 @@ Memhscx_fill_fifo(struct BCState *bcs)
t += sprintf(t, "hscx_fill_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 1df6f9a..2be1c8a 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -535,7 +535,7 @@ check_arcofi(struct IsdnCardState *cs)
t = tmp;
t += sprintf(tmp, "Arcofi data");
QuickHex(t, p, cs->dc.isac.mon_rxp);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) {
switch (cs->dc.isac.mon_rx[1]) {
case 0x80:
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index d4c98d3..3f84dd8 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -344,7 +344,7 @@ static inline void receive_chars(struct IsdnCardState *cs,
t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
}
cs->hw.elsa.rcvcnt = 0;
}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 3ccd724..497bd02 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@ Begin:
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
}
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index dc4574f..fa1fefd 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
}
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
index f398d48..a8d6188 100644
--- a/drivers/isdn/hisax/hscx_irq.c
+++ b/drivers/isdn/hisax/hscx_irq.c
@@ -75,7 +75,7 @@ hscx_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "hscx_empty_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -115,7 +115,7 @@ hscx_fill_fifo(struct BCState *bcs)
t += sprintf(t, "hscx_fill_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index db5321f..51dae91 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -134,7 +134,7 @@ icc_empty_fifo(struct IsdnCardState *cs, int count)
t += sprintf(t, "icc_empty_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -176,7 +176,7 @@ icc_fill_fifo(struct IsdnCardState *cs)
t += sprintf(t, "icc_fill_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 74feb5c..5faa5de 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -260,7 +260,7 @@ dch_empty_fifo(struct IsdnCardState *cs, int count)
t += sprintf(t, "dch_empty_fifo() cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -307,7 +307,7 @@ dch_fill_fifo(struct IsdnCardState *cs)
t += sprintf(t, "dch_fill_fifo() cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -539,7 +539,7 @@ bch_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -582,7 +582,7 @@ bch_fill_fifo(struct BCState *bcs)
t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index a365ccc..7fdf78f 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -137,7 +137,7 @@ isac_empty_fifo(struct IsdnCardState *cs, int count)
t += sprintf(t, "isac_empty_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -179,7 +179,7 @@ isac_fill_fifo(struct IsdnCardState *cs)
t += sprintf(t, "isac_fill_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 7fdf347..f4956c7 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -74,7 +74,7 @@ sendmsg(struct IsdnCardState *cs, u_char his, u_char creg, u_char len,
t = tmp;
t += sprintf(t, "sendmbox cnt %d", len);
QuickHex(t, &msg[len-i], (i > 64) ? 64 : i);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
i -= 64;
}
}
@@ -105,7 +105,7 @@ rcv_mbox(struct IsdnCardState *cs, struct isar_reg *ireg, u_char *msg)
t = tmp;
t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb);
QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
i -= 64;
}
}
@@ -1248,7 +1248,7 @@ isar_int_main(struct IsdnCardState *cs)
tp += sprintf(debbuf, "msg iis(%x) msb(%x)",
ireg->iis, ireg->cmsb);
QuickHex(tp, (u_char *)ireg->par, ireg->clsb);
- debugl1(cs, debbuf);
+ debugl1(cs, "%s", debbuf);
}
break;
case ISAR_IIS_INVMSG:
diff --git a/drivers/isdn/hisax/jade.c b/drivers/isdn/hisax/jade.c
index f946c58..e2ae787 100644
--- a/drivers/isdn/hisax/jade.c
+++ b/drivers/isdn/hisax/jade.c
@@ -81,10 +81,7 @@ modejade(struct BCState *bcs, int mode, int bc)
int jade = bcs->hw.hscx.hscx;
if (cs->debug & L1_DEB_HSCX) {
- char tmp[40];
- sprintf(tmp, "jade %c mode %d ichan %d",
- 'A' + jade, mode, bc);
- debugl1(cs, tmp);
+ debugl1(cs, "jade %c mode %d ichan %d", 'A' + jade, mode, bc);
}
bcs->mode = mode;
bcs->channel = bc;
@@ -257,23 +254,18 @@ void
clear_pending_jade_ints(struct IsdnCardState *cs)
{
int val;
- char tmp[64];
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR);
- sprintf(tmp, "jade B ISTA %x", val);
- debugl1(cs, tmp);
+ debugl1(cs, "jade B ISTA %x", val);
val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR);
- sprintf(tmp, "jade A ISTA %x", val);
- debugl1(cs, tmp);
+ debugl1(cs, "jade A ISTA %x", val);
val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR);
- sprintf(tmp, "jade B STAR %x", val);
- debugl1(cs, tmp);
+ debugl1(cs, "jade B STAR %x", val);
val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR);
- sprintf(tmp, "jade A STAR %x", val);
- debugl1(cs, tmp);
+ debugl1(cs, "jade A STAR %x", val);
/* Unmask ints */
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8);
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
diff --git a/drivers/isdn/hisax/jade_irq.c b/drivers/isdn/hisax/jade_irq.c
index f521fc8..b930da9 100644
--- a/drivers/isdn/hisax/jade_irq.c
+++ b/drivers/isdn/hisax/jade_irq.c
@@ -65,7 +65,7 @@ jade_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "jade_empty_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -105,7 +105,7 @@ jade_fill_fifo(struct BCState *bcs)
t += sprintf(t, "jade_fill_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index 4c1bca5..875402e 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -63,7 +63,7 @@ l3_1tr6_error(struct l3_process *pc, u_char *msg, struct sk_buff *skb)
{
dev_kfree_skb(skb);
if (pc->st->l3.debug & L3_DEB_WARN)
- l3_debug(pc->st, msg);
+ l3_debug(pc->st, "%s", msg);
l3_1tr6_release_req(pc, 0, NULL);
}
@@ -161,7 +161,6 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
{
u_char *p;
int bcfound = 0;
- char tmp[80];
struct sk_buff *skb = arg;
/* Channel Identification */
@@ -214,10 +213,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
/* Signal all services, linklevel takes care of Service-Indicator */
if (bcfound) {
if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) {
- sprintf(tmp, "non-digital call: %s -> %s",
+ l3_debug(pc->st, "non-digital call: %s -> %s",
pc->para.setup.phone,
pc->para.setup.eazmsn);
- l3_debug(pc->st, tmp);
}
newl3state(pc, 6);
pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
@@ -301,7 +299,7 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
{
u_char *p;
int i, tmpcharge = 0;
- char a_charge[8], tmp[32];
+ char a_charge[8];
struct sk_buff *skb = arg;
p = skb->data;
@@ -316,8 +314,8 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
}
if (pc->st->l3.debug & L3_DEB_CHARGE) {
- sprintf(tmp, "charging info %d", pc->para.chargeinfo);
- l3_debug(pc->st, tmp);
+ l3_debug(pc->st, "charging info %d",
+ pc->para.chargeinfo);
}
} else if (pc->st->l3.debug & L3_DEB_CHARGE)
l3_debug(pc->st, "charging info not found");
@@ -399,7 +397,7 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
struct sk_buff *skb = arg;
u_char *p;
int i, tmpcharge = 0;
- char a_charge[8], tmp[32];
+ char a_charge[8];
StopAllL3Timer(pc);
p = skb->data;
@@ -414,8 +412,8 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
}
if (pc->st->l3.debug & L3_DEB_CHARGE) {
- sprintf(tmp, "charging info %d", pc->para.chargeinfo);
- l3_debug(pc->st, tmp);
+ l3_debug(pc->st, "charging info %d",
+ pc->para.chargeinfo);
}
} else if (pc->st->l3.debug & L3_DEB_CHARGE)
l3_debug(pc->st, "charging info not found");
@@ -746,7 +744,6 @@ up1tr6(struct PStack *st, int pr, void *arg)
int i, mt, cr;
struct l3_process *proc;
struct sk_buff *skb = arg;
- char tmp[80];
switch (pr) {
case (DL_DATA | INDICATION):
@@ -762,26 +759,23 @@ up1tr6(struct PStack *st, int pr, void *arg)
}
if (skb->len < 4) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6 len only %d", skb->len);
- l3_debug(st, tmp);
+ l3_debug(st, "up1tr6 len only %d", skb->len);
}
dev_kfree_skb(skb);
return;
}
if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6%sunexpected discriminator %x message len %d",
+ l3_debug(st, "up1tr6%sunexpected discriminator %x message len %d",
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
skb->data[0], skb->len);
- l3_debug(st, tmp);
}
dev_kfree_skb(skb);
return;
}
if (skb->data[1] != 1) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6 CR len not 1");
- l3_debug(st, tmp);
+ l3_debug(st, "up1tr6 CR len not 1");
}
dev_kfree_skb(skb);
return;
@@ -791,9 +785,8 @@ up1tr6(struct PStack *st, int pr, void *arg)
if (skb->data[0] == PROTO_DIS_N0) {
dev_kfree_skb(skb);
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "up1tr6%s N0 mt %x unhandled",
+ l3_debug(st, "up1tr6%s N0 mt %x unhandled",
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt);
- l3_debug(st, tmp);
}
} else if (skb->data[0] == PROTO_DIS_N1) {
if (!(proc = getl3proc(st, cr))) {
@@ -801,8 +794,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
if (cr < 128) {
if (!(proc = new_l3_process(st, cr))) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6 no roc mem");
- l3_debug(st, tmp);
+ l3_debug(st, "up1tr6 no roc mem");
}
dev_kfree_skb(skb);
return;
@@ -821,8 +813,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
} else {
if (!(proc = new_l3_process(st, cr))) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6 no roc mem");
- l3_debug(st, tmp);
+ l3_debug(st, "up1tr6 no roc mem");
}
dev_kfree_skb(skb);
return;
@@ -837,18 +828,16 @@ up1tr6(struct PStack *st, int pr, void *arg)
if (i == ARRAY_SIZE(datastln1)) {
dev_kfree_skb(skb);
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "up1tr6%sstate %d mt %x unhandled",
+ l3_debug(st, "up1tr6%sstate %d mt %x unhandled",
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
proc->state, mt);
- l3_debug(st, tmp);
}
return;
} else {
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "up1tr6%sstate %d mt %x",
+ l3_debug(st, "up1tr6%sstate %d mt %x",
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
proc->state, mt);
- l3_debug(st, tmp);
}
datastln1[i].rout(proc, pr, skb);
}
@@ -861,7 +850,6 @@ down1tr6(struct PStack *st, int pr, void *arg)
int i, cr;
struct l3_process *proc;
struct Channel *chan;
- char tmp[80];
if ((DL_ESTABLISH | REQUEST) == pr) {
l3_msg(st, pr, NULL);
@@ -888,15 +876,13 @@ down1tr6(struct PStack *st, int pr, void *arg)
break;
if (i == ARRAY_SIZE(downstl)) {
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "down1tr6 state %d prim %d unhandled",
+ l3_debug(st, "down1tr6 state %d prim %d unhandled",
proc->state, pr);
- l3_debug(st, tmp);
}
} else {
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "down1tr6 state %d prim %d",
+ l3_debug(st, "down1tr6 state %d prim %d",
proc->state, pr);
- l3_debug(st, tmp);
}
downstl[i].rout(proc, pr, arg);
}
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index b646eed..233e432 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -176,7 +176,7 @@ static void printframe(struct IsdnCardState *cs, u_char *buf, int count, char *s
else
j = i;
QuickHex(t, p, j);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
p += j;
i -= j;
t = tmp;
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index 041bf52..af1b020 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
dp--;
*dp++ = '\n';
*dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
} else
HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
}
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
}
if (finish) {
*dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
return;
}
if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
}
*dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
}
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index d8cac69..a858955 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -154,7 +154,7 @@ W6692_empty_fifo(struct IsdnCardState *cs, int count)
t += sprintf(t, "W6692_empty_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -196,7 +196,7 @@ W6692_fill_fifo(struct IsdnCardState *cs)
t += sprintf(t, "W6692_fill_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -226,7 +226,7 @@ W6692B_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "W6692B_empty_fifo %c cnt %d",
bcs->channel + '1', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -264,7 +264,7 @@ W6692B_fill_fifo(struct BCState *bcs)
t += sprintf(t, "W6692B_fill_fifo %c cnt %d",
bcs->channel + '1', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 074bcb3..875bbe4 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -194,11 +194,11 @@ config LEDS_LP3944
module will be called leds-lp3944.
config LEDS_LP55XX_COMMON
- tristate "Common Driver for TI/National LP5521, LP5523/55231 and LP5562"
- depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562
+ tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
+ depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501
select FW_LOADER
help
- This option supports common operations for LP5521 and LP5523/55231
+ This option supports common operations for LP5521/5523/55231/5562/8501
devices.
config LEDS_LP5521
@@ -232,6 +232,18 @@ config LEDS_LP5562
Driver provides direct control via LED class and interface for
programming the engines.
+config LEDS_LP8501
+ tristate "LED Support for TI LP8501 LED driver chip"
+ depends on LEDS_CLASS && I2C
+ select LEDS_LP55XX_COMMON
+ help
+ If you say yes here you get support for TI LP8501 LED driver.
+ It is 9 channel chip with programmable engines.
+ Driver provides direct control via LED class and interface for
+ programming the engines.
+ It is similar as LP5523, but output power selection is available.
+ And register layout and engine program schemes are different.
+
config LEDS_LP8788
tristate "LED support for the TI LP8788 PMIC"
depends on LEDS_CLASS
@@ -279,13 +291,14 @@ config LEDS_PCA955X
LED driver chips accessed via the I2C bus. Supported
devices include PCA9550, PCA9551, PCA9552, and PCA9553.
-config LEDS_PCA9633
- tristate "LED support for PCA9633 I2C chip"
+config LEDS_PCA963X
+ tristate "LED support for PCA963x I2C chip"
depends on LEDS_CLASS
depends on I2C
help
- This option enables support for LEDs connected to the PCA9633
- LED driver chip accessed via the I2C bus.
+ This option enables support for LEDs connected to the PCA963x
+ LED driver chip accessed via the I2C bus. Supported
+ devices include PCA9633 and PCA9634
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
@@ -398,10 +411,7 @@ config LEDS_MC13783
config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
- depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || \
- MACH_NETSPACE_MAX_V2 || MACH_D2NET_V2 || \
- MACH_NETSPACE_V2_DT || MACH_INETSPACE_V2_DT || \
- MACH_NETSPACE_MAX_V2_DT || MACH_NETSPACE_MINI_V2_DT
+ depends on ARCH_KIRKWOOD
default y
help
This option enable support for the dual-GPIO LED found on the
@@ -410,8 +420,8 @@ config LEDS_NS2
config LEDS_NETXBIG
tristate "LED support for Big Network series LEDs"
- depends on MACH_NET2BIG_V2 || MACH_NET5BIG_V2
depends on LEDS_CLASS
+ depends on ARCH_KIRKWOOD
default y
help
This option enable support for LEDs found on the LaCie 2Big
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index ae4b613..8979b0b 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o
+obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
@@ -34,7 +35,7 @@ obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
-obj-$(CONFIG_LEDS_PCA9633) += leds-pca9633.o
+obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 232b3ce..5f588c0 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -157,7 +157,7 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
static int pm860x_led_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_led_pdata *pdata = pdev->dev.platform_data;
+ struct pm860x_led_pdata *pdata = dev_get_platdata(&pdev->dev);
struct pm860x_led *data;
struct resource *res;
int ret = 0;
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index e8072ab..7e311a1 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -87,7 +87,7 @@ static int adp5520_led_setup(struct adp5520_led *led)
static int adp5520_led_prepare(struct platform_device *pdev)
{
- struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = pdev->dev.parent;
int ret = 0;
@@ -103,7 +103,7 @@ static int adp5520_led_prepare(struct platform_device *pdev)
static int adp5520_led_probe(struct platform_device *pdev)
{
- struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct adp5520_led *led, *led_dat;
struct led_info *cur_led;
int ret, i;
@@ -185,7 +185,7 @@ err:
static int adp5520_led_remove(struct platform_device *pdev)
{
- struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct adp5520_led *led;
int i;
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index cf9efe4..6de216a 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -94,7 +94,7 @@ static int blink_set(struct led_classdev *cdev,
static int asic3_led_probe(struct platform_device *pdev)
{
- struct asic3_led *led = pdev->dev.platform_data;
+ struct asic3_led *led = dev_get_platdata(&pdev->dev);
int ret;
ret = mfd_cell_enable(pdev);
@@ -127,7 +127,7 @@ out:
static int asic3_led_remove(struct platform_device *pdev)
{
- struct asic3_led *led = pdev->dev.platform_data;
+ struct asic3_led *led = dev_get_platdata(&pdev->dev);
led_classdev_unregister(led->cdev);
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 90518f8..56cec8d 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -42,7 +42,7 @@ static int pwmled_probe(struct platform_device *pdev)
int i;
int status;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata || pdata->num_leds < 1)
return -ENODEV;
@@ -119,7 +119,7 @@ static int pwmled_remove(struct platform_device *pdev)
struct pwmled *leds;
unsigned i;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
leds = platform_get_drvdata(pdev);
for (i = 0; i < pdata->num_leds; i++) {
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 2db0423..fb5a347 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -684,7 +684,7 @@ static int bd2802_probe(struct i2c_client *client,
}
led->client = client;
- pdata = led->pdata = client->dev.platform_data;
+ pdata = led->pdata = dev_get_platdata(&client->dev);
i2c_set_clientdata(client, led);
/* Configure RESET GPIO (L: RESET, H: RESET cancel) */
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index 6a8405d..d93e245 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
* detected as working, but in reality it is not) as low as
* possible.
*/
-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
+static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
{
.callback = clevo_mail_led_dmi_callback,
.ident = "Clevo D410J",
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index c263a21..2a4b87f 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -93,7 +93,7 @@ static void da903x_led_set(struct led_classdev *led_cdev,
static int da903x_led_probe(struct platform_device *pdev)
{
- struct led_info *pdata = pdev->dev.platform_data;
+ struct led_info *pdata = dev_get_platdata(&pdev->dev);
struct da903x_led *led;
int id, ret;
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
index efec433..865d4fa 100644
--- a/drivers/leds/leds-da9052.c
+++ b/drivers/leds/leds-da9052.c
@@ -112,7 +112,7 @@ static int da9052_led_probe(struct platform_device *pdev)
int i;
da9052 = dev_get_drvdata(pdev->dev.parent);
- pdata = da9052->dev->platform_data;
+ pdata = dev_get_platdata(da9052->dev);
if (pdata == NULL) {
dev_err(&pdev->dev, "No platform data\n");
goto err;
@@ -185,7 +185,7 @@ static int da9052_led_remove(struct platform_device *pdev)
int i;
da9052 = dev_get_drvdata(pdev->dev.parent);
- pdata = da9052->dev->platform_data;
+ pdata = dev_get_platdata(da9052->dev);
pled = pdata->pled;
for (i = 0; i < pled->num_leds; i++) {
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 84d74c37..e8b01e5 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -233,7 +233,7 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
static int gpio_led_probe(struct platform_device *pdev)
{
- struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct gpio_leds_priv *priv;
int i, ret = 0;
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index a036a19..652368c 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -403,7 +403,7 @@ static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set);
static int lm3530_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lm3530_platform_data *pdata = client->dev.platform_data;
+ struct lm3530_platform_data *pdata = dev_get_platdata(&client->dev);
struct lm3530_data *drvdata;
int err = 0;
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index bbf24d0..027ede7 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -671,7 +671,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
if (!lm3533)
return -EINVAL;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index d81a8e7..591eb5e 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -423,7 +423,7 @@ static const struct regmap_config lm355x_regmap = {
static int lm355x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lm355x_platform_data *pdata = client->dev.platform_data;
+ struct lm355x_platform_data *pdata = dev_get_platdata(&client->dev);
struct lm355x_chip_data *chip;
int err;
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index f361bbe..ceb6b3c 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -316,7 +316,7 @@ static const struct regmap_config lm3642_regmap = {
static int lm3642_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lm3642_platform_data *pdata = client->dev.platform_data;
+ struct lm3642_platform_data *pdata = dev_get_platdata(&client->dev);
struct lm3642_chip_data *chip;
int err;
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 0c4386e..8e1abdc 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -289,7 +289,7 @@ static void lp3944_led_set_brightness(struct led_classdev *led_cdev,
dev_dbg(&led->client->dev, "%s: %s, %d\n",
__func__, led_cdev->name, brightness);
- led->status = brightness;
+ led->status = !!brightness;
schedule_work(&led->work);
}
@@ -377,7 +377,8 @@ exit:
static int lp3944_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
+ struct lp3944_platform_data *lp3944_pdata =
+ dev_get_platdata(&client->dev);
struct lp3944_data *data;
int err;
@@ -413,7 +414,7 @@ static int lp3944_probe(struct i2c_client *client,
static int lp3944_remove(struct i2c_client *client)
{
- struct lp3944_platform_data *pdata = client->dev.platform_data;
+ struct lp3944_platform_data *pdata = dev_get_platdata(&client->dev);
struct lp3944_data *data = i2c_get_clientdata(client);
int i;
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 1392feb..0518835 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -220,17 +220,11 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
};
unsigned cmd;
char c[3];
- int program_size;
int nrchars;
- int offset = 0;
int ret;
- int i;
-
- /* clear program memory before updating */
- for (i = 0; i < LP5521_PROGRAM_LENGTH; i++)
- lp55xx_write(chip, addr[idx] + i, 0);
+ int offset = 0;
+ int i = 0;
- i = 0;
while ((offset < size - 1) && (i < LP5521_PROGRAM_LENGTH)) {
/* separate sscanfs because length is working only for %s */
ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
@@ -250,11 +244,19 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
if (i % 2)
goto err;
- program_size = i;
- for (i = 0; i < program_size; i++)
- lp55xx_write(chip, addr[idx] + i, pattern[i]);
+ mutex_lock(&chip->lock);
- return 0;
+ for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
+ ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
+ if (ret) {
+ mutex_unlock(&chip->lock);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&chip->lock);
+
+ return size;
err:
dev_err(&chip->cl->dev, "wrong pattern format\n");
@@ -365,6 +367,80 @@ static void lp5521_led_brightness_work(struct work_struct *work)
mutex_unlock(&chip->lock);
}
+static ssize_t show_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ enum lp55xx_engine_mode mode = chip->engines[nr - 1].mode;
+
+ switch (mode) {
+ case LP55XX_ENGINE_RUN:
+ return sprintf(buf, "run\n");
+ case LP55XX_ENGINE_LOAD:
+ return sprintf(buf, "load\n");
+ case LP55XX_ENGINE_DISABLED:
+ default:
+ return sprintf(buf, "disabled\n");
+ }
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_engine *engine = &chip->engines[nr - 1];
+
+ mutex_lock(&chip->lock);
+
+ chip->engine_idx = nr;
+
+ if (!strncmp(buf, "run", 3)) {
+ lp5521_run_engine(chip, true);
+ engine->mode = LP55XX_ENGINE_RUN;
+ } else if (!strncmp(buf, "load", 4)) {
+ lp5521_stop_engine(chip);
+ lp5521_load_engine(chip);
+ engine->mode = LP55XX_ENGINE_LOAD;
+ } else if (!strncmp(buf, "disabled", 8)) {
+ lp5521_stop_engine(chip);
+ engine->mode = LP55XX_ENGINE_DISABLED;
+ }
+
+ mutex_unlock(&chip->lock);
+
+ return len;
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t store_engine_load(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+
+ mutex_lock(&chip->lock);
+
+ chip->engine_idx = nr;
+ lp5521_load_engine(chip);
+
+ mutex_unlock(&chip->lock);
+
+ return lp5521_update_program_memory(chip, buf, len);
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
static ssize_t lp5521_selftest(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -381,9 +457,21 @@ static ssize_t lp5521_selftest(struct device *dev,
}
/* device attributes */
-static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
+static LP55XX_DEV_ATTR_RW(engine1_mode, show_engine1_mode, store_engine1_mode);
+static LP55XX_DEV_ATTR_RW(engine2_mode, show_engine2_mode, store_engine2_mode);
+static LP55XX_DEV_ATTR_RW(engine3_mode, show_engine3_mode, store_engine3_mode);
+static LP55XX_DEV_ATTR_WO(engine1_load, store_engine1_load);
+static LP55XX_DEV_ATTR_WO(engine2_load, store_engine2_load);
+static LP55XX_DEV_ATTR_WO(engine3_load, store_engine3_load);
+static LP55XX_DEV_ATTR_RO(selftest, lp5521_selftest);
static struct attribute *lp5521_attributes[] = {
+ &dev_attr_engine1_mode.attr,
+ &dev_attr_engine2_mode.attr,
+ &dev_attr_engine3_mode.attr,
+ &dev_attr_engine1_load.attr,
+ &dev_attr_engine2_load.attr,
+ &dev_attr_engine3_load.attr,
&dev_attr_selftest.attr,
NULL
};
@@ -420,7 +508,7 @@ static int lp5521_probe(struct i2c_client *client,
struct lp55xx_platform_data *pdata;
struct device_node *np = client->dev.of_node;
- if (!client->dev.platform_data) {
+ if (!dev_get_platdata(&client->dev)) {
if (np) {
ret = lp55xx_of_populate_pdata(&client->dev, np);
if (ret < 0)
@@ -430,7 +518,7 @@ static int lp5521_probe(struct i2c_client *client,
return -EINVAL;
}
}
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 3979428..fe3bcbb 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -49,6 +49,9 @@
#define LP5523_REG_RESET 0x3D
#define LP5523_REG_LED_TEST_CTRL 0x41
#define LP5523_REG_LED_TEST_ADC 0x42
+#define LP5523_REG_CH1_PROG_START 0x4C
+#define LP5523_REG_CH2_PROG_START 0x4D
+#define LP5523_REG_CH3_PROG_START 0x4E
#define LP5523_REG_PROG_PAGE_SEL 0x4F
#define LP5523_REG_PROG_MEM 0x50
@@ -65,11 +68,15 @@
#define LP5523_RESET 0xFF
#define LP5523_ADC_SHORTCIRC_LIM 80
#define LP5523_EXT_CLK_USED 0x08
+#define LP5523_ENG_STATUS_MASK 0x07
/* Memory Page Selection */
#define LP5523_PAGE_ENG1 0
#define LP5523_PAGE_ENG2 1
#define LP5523_PAGE_ENG3 2
+#define LP5523_PAGE_MUX1 3
+#define LP5523_PAGE_MUX2 4
+#define LP5523_PAGE_MUX3 5
/* Program Memory Operations */
#define LP5523_MODE_ENG1_M 0x30 /* Operation Mode Register */
@@ -94,11 +101,15 @@
#define LP5523_RUN_ENG2 0x08
#define LP5523_RUN_ENG3 0x02
+#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
+
enum lp5523_chip_id {
LP5523,
LP55231,
};
+static int lp5523_init_program_engine(struct lp55xx_chip *chip);
+
static inline void lp5523_wait_opmode_done(void)
{
usleep_range(1000, 2000);
@@ -134,7 +145,11 @@ static int lp5523_post_init_device(struct lp55xx_chip *chip)
if (ret)
return ret;
- return lp55xx_write(chip, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
+ ret = lp55xx_write(chip, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
+ if (ret)
+ return ret;
+
+ return lp5523_init_program_engine(chip);
}
static void lp5523_load_engine(struct lp55xx_chip *chip)
@@ -152,15 +167,21 @@ static void lp5523_load_engine(struct lp55xx_chip *chip)
[LP55XX_ENGINE_3] = LP5523_LOAD_ENG3,
};
+ lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], val[idx]);
+
+ lp5523_wait_opmode_done();
+}
+
+static void lp5523_load_engine_and_select_page(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
u8 page_sel[] = {
[LP55XX_ENGINE_1] = LP5523_PAGE_ENG1,
[LP55XX_ENGINE_2] = LP5523_PAGE_ENG2,
[LP55XX_ENGINE_3] = LP5523_PAGE_ENG3,
};
- lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], val[idx]);
-
- lp5523_wait_opmode_done();
+ lp5523_load_engine(chip);
lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, page_sel[idx]);
}
@@ -227,23 +248,75 @@ static void lp5523_run_engine(struct lp55xx_chip *chip, bool start)
lp55xx_update_bits(chip, LP5523_REG_ENABLE, LP5523_EXEC_M, exec);
}
+static int lp5523_init_program_engine(struct lp55xx_chip *chip)
+{
+ int i;
+ int j;
+ int ret;
+ u8 status;
+ /* one pattern per engine setting LED MUX start and stop addresses */
+ static const u8 pattern[][LP5523_PROGRAM_LENGTH] = {
+ { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
+ };
+
+ /* hardcode 32 bytes of memory for each engine from program memory */
+ ret = lp55xx_write(chip, LP5523_REG_CH1_PROG_START, 0x00);
+ if (ret)
+ return ret;
+
+ ret = lp55xx_write(chip, LP5523_REG_CH2_PROG_START, 0x10);
+ if (ret)
+ return ret;
+
+ ret = lp55xx_write(chip, LP5523_REG_CH3_PROG_START, 0x20);
+ if (ret)
+ return ret;
+
+ /* write LED MUX address space for each engine */
+ for (i = LP55XX_ENGINE_1; i <= LP55XX_ENGINE_3; i++) {
+ chip->engine_idx = i;
+ lp5523_load_engine_and_select_page(chip);
+
+ for (j = 0; j < LP5523_PROGRAM_LENGTH; j++) {
+ ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + j,
+ pattern[i - 1][j]);
+ if (ret)
+ goto out;
+ }
+ }
+
+ lp5523_run_engine(chip, true);
+
+ /* Let the programs run for couple of ms and check the engine status */
+ usleep_range(3000, 6000);
+ lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ status &= LP5523_ENG_STATUS_MASK;
+
+ if (status != LP5523_ENG_STATUS_MASK) {
+ dev_err(&chip->cl->dev,
+ "cound not configure LED engine, status = 0x%.2x\n",
+ status);
+ ret = -1;
+ }
+
+out:
+ lp5523_stop_engine(chip);
+ return ret;
+}
+
static int lp5523_update_program_memory(struct lp55xx_chip *chip,
const u8 *data, size_t size)
{
u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
unsigned cmd;
char c[3];
- int update_size;
int nrchars;
- int offset = 0;
int ret;
- int i;
-
- /* clear program memory before updating */
- for (i = 0; i < LP5523_PROGRAM_LENGTH; i++)
- lp55xx_write(chip, LP5523_REG_PROG_MEM + i, 0);
+ int offset = 0;
+ int i = 0;
- i = 0;
while ((offset < size - 1) && (i < LP5523_PROGRAM_LENGTH)) {
/* separate sscanfs because length is working only for %s */
ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
@@ -263,11 +336,19 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
if (i % 2)
goto err;
- update_size = i;
- for (i = 0; i < update_size; i++)
- lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
+ mutex_lock(&chip->lock);
- return 0;
+ for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
+ ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
+ if (ret) {
+ mutex_unlock(&chip->lock);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&chip->lock);
+
+ return size;
err:
dev_err(&chip->cl->dev, "wrong pattern format\n");
@@ -290,10 +371,196 @@ static void lp5523_firmware_loaded(struct lp55xx_chip *chip)
* 2) write firmware data into program memory
*/
- lp5523_load_engine(chip);
+ lp5523_load_engine_and_select_page(chip);
lp5523_update_program_memory(chip, fw->data, fw->size);
}
+static ssize_t show_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ enum lp55xx_engine_mode mode = chip->engines[nr - 1].mode;
+
+ switch (mode) {
+ case LP55XX_ENGINE_RUN:
+ return sprintf(buf, "run\n");
+ case LP55XX_ENGINE_LOAD:
+ return sprintf(buf, "load\n");
+ case LP55XX_ENGINE_DISABLED:
+ default:
+ return sprintf(buf, "disabled\n");
+ }
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_engine *engine = &chip->engines[nr - 1];
+
+ mutex_lock(&chip->lock);
+
+ chip->engine_idx = nr;
+
+ if (!strncmp(buf, "run", 3)) {
+ lp5523_run_engine(chip, true);
+ engine->mode = LP55XX_ENGINE_RUN;
+ } else if (!strncmp(buf, "load", 4)) {
+ lp5523_stop_engine(chip);
+ lp5523_load_engine(chip);
+ engine->mode = LP55XX_ENGINE_LOAD;
+ } else if (!strncmp(buf, "disabled", 8)) {
+ lp5523_stop_engine(chip);
+ engine->mode = LP55XX_ENGINE_DISABLED;
+ }
+
+ mutex_unlock(&chip->lock);
+
+ return len;
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
+{
+ u16 tmp_mux = 0;
+ int i;
+
+ len = min_t(int, len, LP5523_MAX_LEDS);
+
+ for (i = 0; i < len; i++) {
+ switch (buf[i]) {
+ case '1':
+ tmp_mux |= (1 << i);
+ break;
+ case '0':
+ break;
+ case '\n':
+ i = len;
+ break;
+ default:
+ return -1;
+ }
+ }
+ *mux = tmp_mux;
+
+ return 0;
+}
+
+static void lp5523_mux_to_array(u16 led_mux, char *array)
+{
+ int i, pos = 0;
+ for (i = 0; i < LP5523_MAX_LEDS; i++)
+ pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
+
+ array[pos] = '\0';
+}
+
+static ssize_t show_engine_leds(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ char mux[LP5523_MAX_LEDS + 1];
+
+ lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
+
+ return sprintf(buf, "%s\n", mux);
+}
+show_leds(1)
+show_leds(2)
+show_leds(3)
+
+static int lp5523_load_mux(struct lp55xx_chip *chip, u16 mux, int nr)
+{
+ struct lp55xx_engine *engine = &chip->engines[nr - 1];
+ int ret;
+ u8 mux_page[] = {
+ [LP55XX_ENGINE_1] = LP5523_PAGE_MUX1,
+ [LP55XX_ENGINE_2] = LP5523_PAGE_MUX2,
+ [LP55XX_ENGINE_3] = LP5523_PAGE_MUX3,
+ };
+
+ lp5523_load_engine(chip);
+
+ ret = lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, mux_page[nr]);
+ if (ret)
+ return ret;
+
+ ret = lp55xx_write(chip, LP5523_REG_PROG_MEM , (u8)(mux >> 8));
+ if (ret)
+ return ret;
+
+ ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + 1, (u8)(mux));
+ if (ret)
+ return ret;
+
+ engine->led_mux = mux;
+ return 0;
+}
+
+static ssize_t store_engine_leds(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_engine *engine = &chip->engines[nr - 1];
+ u16 mux = 0;
+ ssize_t ret;
+
+ if (lp5523_mux_parse(buf, &mux, len))
+ return -EINVAL;
+
+ mutex_lock(&chip->lock);
+
+ chip->engine_idx = nr;
+ ret = -EINVAL;
+
+ if (engine->mode != LP55XX_ENGINE_LOAD)
+ goto leave;
+
+ if (lp5523_load_mux(chip, mux, nr))
+ goto leave;
+
+ ret = len;
+leave:
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+store_leds(1)
+store_leds(2)
+store_leds(3)
+
+static ssize_t store_engine_load(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, int nr)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+
+ mutex_lock(&chip->lock);
+
+ chip->engine_idx = nr;
+ lp5523_load_engine_and_select_page(chip);
+
+ mutex_unlock(&chip->lock);
+
+ return lp5523_update_program_memory(chip, buf, len);
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
static ssize_t lp5523_selftest(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -393,9 +660,27 @@ static void lp5523_led_brightness_work(struct work_struct *work)
mutex_unlock(&chip->lock);
}
-static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
+static LP55XX_DEV_ATTR_RW(engine1_mode, show_engine1_mode, store_engine1_mode);
+static LP55XX_DEV_ATTR_RW(engine2_mode, show_engine2_mode, store_engine2_mode);
+static LP55XX_DEV_ATTR_RW(engine3_mode, show_engine3_mode, store_engine3_mode);
+static LP55XX_DEV_ATTR_RW(engine1_leds, show_engine1_leds, store_engine1_leds);
+static LP55XX_DEV_ATTR_RW(engine2_leds, show_engine2_leds, store_engine2_leds);
+static LP55XX_DEV_ATTR_RW(engine3_leds, show_engine3_leds, store_engine3_leds);
+static LP55XX_DEV_ATTR_WO(engine1_load, store_engine1_load);
+static LP55XX_DEV_ATTR_WO(engine2_load, store_engine2_load);
+static LP55XX_DEV_ATTR_WO(engine3_load, store_engine3_load);
+static LP55XX_DEV_ATTR_RO(selftest, lp5523_selftest);
static struct attribute *lp5523_attributes[] = {
+ &dev_attr_engine1_mode.attr,
+ &dev_attr_engine2_mode.attr,
+ &dev_attr_engine3_mode.attr,
+ &dev_attr_engine1_load.attr,
+ &dev_attr_engine2_load.attr,
+ &dev_attr_engine3_load.attr,
+ &dev_attr_engine1_leds.attr,
+ &dev_attr_engine2_leds.attr,
+ &dev_attr_engine3_leds.attr,
&dev_attr_selftest.attr,
NULL,
};
@@ -432,7 +717,7 @@ static int lp5523_probe(struct i2c_client *client,
struct lp55xx_platform_data *pdata;
struct device_node *np = client->dev.of_node;
- if (!client->dev.platform_data) {
+ if (!dev_get_platdata(&client->dev)) {
if (np) {
ret = lp55xx_of_populate_pdata(&client->dev, np);
if (ret < 0)
@@ -442,7 +727,7 @@ static int lp5523_probe(struct i2c_client *client,
return -EINVAL;
}
}
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index cbd856d..2585cfd 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -477,8 +477,8 @@ static ssize_t lp5562_store_engine_mux(struct device *dev,
return len;
}
-static DEVICE_ATTR(led_pattern, S_IWUSR, NULL, lp5562_store_pattern);
-static DEVICE_ATTR(engine_mux, S_IWUSR, NULL, lp5562_store_engine_mux);
+static LP55XX_DEV_ATTR_WO(led_pattern, lp5562_store_pattern);
+static LP55XX_DEV_ATTR_WO(engine_mux, lp5562_store_engine_mux);
static struct attribute *lp5562_attributes[] = {
&dev_attr_led_pattern.attr,
@@ -518,7 +518,7 @@ static int lp5562_probe(struct i2c_client *client,
struct lp55xx_platform_data *pdata;
struct device_node *np = client->dev.of_node;
- if (!client->dev.platform_data) {
+ if (!dev_get_platdata(&client->dev)) {
if (np) {
ret = lp55xx_of_populate_pdata(&client->dev, np);
if (ret < 0)
@@ -528,7 +528,7 @@ static int lp5562_probe(struct i2c_client *client,
return -EINVAL;
}
}
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index c2fecd4..351825b 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -593,6 +593,9 @@ int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
of_property_read_string(np, "label", &pdata->label);
of_property_read_u8(np, "clock-mode", &pdata->clock_mode);
+ /* LP8501 specific */
+ of_property_read_u8(np, "pwr-sel", (u8 *)&pdata->pwr_sel);
+
dev->platform_data = pdata;
return 0;
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
index dbbf86d..cceab48 100644
--- a/drivers/leds/leds-lp55xx-common.h
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -20,8 +20,62 @@ enum lp55xx_engine_index {
LP55XX_ENGINE_1,
LP55XX_ENGINE_2,
LP55XX_ENGINE_3,
+ LP55XX_ENGINE_MAX = LP55XX_ENGINE_3,
};
+enum lp55xx_engine_mode {
+ LP55XX_ENGINE_DISABLED,
+ LP55XX_ENGINE_LOAD,
+ LP55XX_ENGINE_RUN,
+};
+
+#define LP55XX_DEV_ATTR_RW(name, show, store) \
+ DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show, store)
+#define LP55XX_DEV_ATTR_RO(name, show) \
+ DEVICE_ATTR(name, S_IRUGO, show, NULL)
+#define LP55XX_DEV_ATTR_WO(name, store) \
+ DEVICE_ATTR(name, S_IWUSR, NULL, store)
+
+#define show_mode(nr) \
+static ssize_t show_engine##nr##_mode(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return show_engine_mode(dev, attr, buf, nr); \
+}
+
+#define store_mode(nr) \
+static ssize_t store_engine##nr##_mode(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_engine_mode(dev, attr, buf, len, nr); \
+}
+
+#define show_leds(nr) \
+static ssize_t show_engine##nr##_leds(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return show_engine_leds(dev, attr, buf, nr); \
+}
+
+#define store_leds(nr) \
+static ssize_t store_engine##nr##_leds(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_engine_leds(dev, attr, buf, len, nr); \
+}
+
+#define store_load(nr) \
+static ssize_t store_engine##nr##_load(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_engine_load(dev, attr, buf, len, nr); \
+}
+
struct lp55xx_led;
struct lp55xx_chip;
@@ -72,6 +126,16 @@ struct lp55xx_device_config {
};
/*
+ * struct lp55xx_engine
+ * @mode : Engine mode
+ * @led_mux : Mux bits for LED selection. Only used in LP5523
+ */
+struct lp55xx_engine {
+ enum lp55xx_engine_mode mode;
+ u16 led_mux;
+};
+
+/*
* struct lp55xx_chip
* @cl : I2C communication for access registers
* @pdata : Platform specific data
@@ -79,6 +143,7 @@ struct lp55xx_device_config {
* @num_leds : Number of registered LEDs
* @cfg : Device specific configuration data
* @engine_idx : Selected engine number
+ * @engines : Engine structure for the device attribute R/W interface
* @fw : Firmware data for running a LED pattern
*/
struct lp55xx_chip {
@@ -89,6 +154,7 @@ struct lp55xx_chip {
int num_leds;
struct lp55xx_device_config *cfg;
enum lp55xx_engine_index engine_idx;
+ struct lp55xx_engine engines[LP55XX_ENGINE_MAX];
const struct firmware *fw;
};
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
new file mode 100644
index 0000000..8d55a780
--- /dev/null
+++ b/drivers/leds/leds-lp8501.c
@@ -0,0 +1,410 @@
+/*
+ * TI LP8501 9 channel LED Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/leds-lp55xx.h>
+#include <linux/slab.h>
+
+#include "leds-lp55xx-common.h"
+
+#define LP8501_PROGRAM_LENGTH 32
+#define LP8501_MAX_LEDS 9
+
+/* Registers */
+#define LP8501_REG_ENABLE 0x00
+#define LP8501_ENABLE BIT(6)
+#define LP8501_EXEC_M 0x3F
+#define LP8501_EXEC_ENG1_M 0x30
+#define LP8501_EXEC_ENG2_M 0x0C
+#define LP8501_EXEC_ENG3_M 0x03
+#define LP8501_RUN_ENG1 0x20
+#define LP8501_RUN_ENG2 0x08
+#define LP8501_RUN_ENG3 0x02
+
+#define LP8501_REG_OP_MODE 0x01
+#define LP8501_MODE_ENG1_M 0x30
+#define LP8501_MODE_ENG2_M 0x0C
+#define LP8501_MODE_ENG3_M 0x03
+#define LP8501_LOAD_ENG1 0x10
+#define LP8501_LOAD_ENG2 0x04
+#define LP8501_LOAD_ENG3 0x01
+
+#define LP8501_REG_PWR_CONFIG 0x05
+#define LP8501_PWR_CONFIG_M 0x03
+
+#define LP8501_REG_LED_PWM_BASE 0x16
+
+#define LP8501_REG_LED_CURRENT_BASE 0x26
+
+#define LP8501_REG_CONFIG 0x36
+#define LP8501_PWM_PSAVE BIT(7)
+#define LP8501_AUTO_INC BIT(6)
+#define LP8501_PWR_SAVE BIT(5)
+#define LP8501_CP_AUTO 0x18
+#define LP8501_INT_CLK BIT(0)
+#define LP8501_DEFAULT_CFG \
+ (LP8501_PWM_PSAVE | LP8501_AUTO_INC | LP8501_PWR_SAVE | LP8501_CP_AUTO)
+
+#define LP8501_REG_RESET 0x3D
+#define LP8501_RESET 0xFF
+
+#define LP8501_REG_PROG_PAGE_SEL 0x4F
+#define LP8501_PAGE_ENG1 0
+#define LP8501_PAGE_ENG2 1
+#define LP8501_PAGE_ENG3 2
+
+#define LP8501_REG_PROG_MEM 0x50
+
+#define LP8501_ENG1_IS_LOADING(mode) \
+ ((mode & LP8501_MODE_ENG1_M) == LP8501_LOAD_ENG1)
+#define LP8501_ENG2_IS_LOADING(mode) \
+ ((mode & LP8501_MODE_ENG2_M) == LP8501_LOAD_ENG2)
+#define LP8501_ENG3_IS_LOADING(mode) \
+ ((mode & LP8501_MODE_ENG3_M) == LP8501_LOAD_ENG3)
+
+static inline void lp8501_wait_opmode_done(void)
+{
+ usleep_range(1000, 2000);
+}
+
+static void lp8501_set_led_current(struct lp55xx_led *led, u8 led_current)
+{
+ led->led_current = led_current;
+ lp55xx_write(led->chip, LP8501_REG_LED_CURRENT_BASE + led->chan_nr,
+ led_current);
+}
+
+static int lp8501_post_init_device(struct lp55xx_chip *chip)
+{
+ int ret;
+ u8 val = LP8501_DEFAULT_CFG;
+
+ ret = lp55xx_write(chip, LP8501_REG_ENABLE, LP8501_ENABLE);
+ if (ret)
+ return ret;
+
+ /* Chip startup time is 500 us, 1 - 2 ms gives some margin */
+ usleep_range(1000, 2000);
+
+ if (chip->pdata->clock_mode != LP55XX_CLOCK_EXT)
+ val |= LP8501_INT_CLK;
+
+ ret = lp55xx_write(chip, LP8501_REG_CONFIG, val);
+ if (ret)
+ return ret;
+
+ /* Power selection for each output */
+ return lp55xx_update_bits(chip, LP8501_REG_PWR_CONFIG,
+ LP8501_PWR_CONFIG_M, chip->pdata->pwr_sel);
+}
+
+static void lp8501_load_engine(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP8501_MODE_ENG1_M,
+ [LP55XX_ENGINE_2] = LP8501_MODE_ENG2_M,
+ [LP55XX_ENGINE_3] = LP8501_MODE_ENG3_M,
+ };
+
+ u8 val[] = {
+ [LP55XX_ENGINE_1] = LP8501_LOAD_ENG1,
+ [LP55XX_ENGINE_2] = LP8501_LOAD_ENG2,
+ [LP55XX_ENGINE_3] = LP8501_LOAD_ENG3,
+ };
+
+ u8 page_sel[] = {
+ [LP55XX_ENGINE_1] = LP8501_PAGE_ENG1,
+ [LP55XX_ENGINE_2] = LP8501_PAGE_ENG2,
+ [LP55XX_ENGINE_3] = LP8501_PAGE_ENG3,
+ };
+
+ lp55xx_update_bits(chip, LP8501_REG_OP_MODE, mask[idx], val[idx]);
+
+ lp8501_wait_opmode_done();
+
+ lp55xx_write(chip, LP8501_REG_PROG_PAGE_SEL, page_sel[idx]);
+}
+
+static void lp8501_stop_engine(struct lp55xx_chip *chip)
+{
+ lp55xx_write(chip, LP8501_REG_OP_MODE, 0);
+ lp8501_wait_opmode_done();
+}
+
+static void lp8501_turn_off_channels(struct lp55xx_chip *chip)
+{
+ int i;
+
+ for (i = 0; i < LP8501_MAX_LEDS; i++)
+ lp55xx_write(chip, LP8501_REG_LED_PWM_BASE + i, 0);
+}
+
+static void lp8501_run_engine(struct lp55xx_chip *chip, bool start)
+{
+ int ret;
+ u8 mode;
+ u8 exec;
+
+ /* stop engine */
+ if (!start) {
+ lp8501_stop_engine(chip);
+ lp8501_turn_off_channels(chip);
+ return;
+ }
+
+ /*
+ * To run the engine,
+ * operation mode and enable register should updated at the same time
+ */
+
+ ret = lp55xx_read(chip, LP8501_REG_OP_MODE, &mode);
+ if (ret)
+ return;
+
+ ret = lp55xx_read(chip, LP8501_REG_ENABLE, &exec);
+ if (ret)
+ return;
+
+ /* change operation mode to RUN only when each engine is loading */
+ if (LP8501_ENG1_IS_LOADING(mode)) {
+ mode = (mode & ~LP8501_MODE_ENG1_M) | LP8501_RUN_ENG1;
+ exec = (exec & ~LP8501_EXEC_ENG1_M) | LP8501_RUN_ENG1;
+ }
+
+ if (LP8501_ENG2_IS_LOADING(mode)) {
+ mode = (mode & ~LP8501_MODE_ENG2_M) | LP8501_RUN_ENG2;
+ exec = (exec & ~LP8501_EXEC_ENG2_M) | LP8501_RUN_ENG2;
+ }
+
+ if (LP8501_ENG3_IS_LOADING(mode)) {
+ mode = (mode & ~LP8501_MODE_ENG3_M) | LP8501_RUN_ENG3;
+ exec = (exec & ~LP8501_EXEC_ENG3_M) | LP8501_RUN_ENG3;
+ }
+
+ lp55xx_write(chip, LP8501_REG_OP_MODE, mode);
+ lp8501_wait_opmode_done();
+
+ lp55xx_update_bits(chip, LP8501_REG_ENABLE, LP8501_EXEC_M, exec);
+}
+
+static int lp8501_update_program_memory(struct lp55xx_chip *chip,
+ const u8 *data, size_t size)
+{
+ u8 pattern[LP8501_PROGRAM_LENGTH] = {0};
+ unsigned cmd;
+ char c[3];
+ int update_size;
+ int nrchars;
+ int offset = 0;
+ int ret;
+ int i;
+
+ /* clear program memory before updating */
+ for (i = 0; i < LP8501_PROGRAM_LENGTH; i++)
+ lp55xx_write(chip, LP8501_REG_PROG_MEM + i, 0);
+
+ i = 0;
+ while ((offset < size - 1) && (i < LP8501_PROGRAM_LENGTH)) {
+ /* separate sscanfs because length is working only for %s */
+ ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
+ if (ret != 1)
+ goto err;
+
+ ret = sscanf(c, "%2x", &cmd);
+ if (ret != 1)
+ goto err;
+
+ pattern[i] = (u8)cmd;
+ offset += nrchars;
+ i++;
+ }
+
+ /* Each instruction is 16bit long. Check that length is even */
+ if (i % 2)
+ goto err;
+
+ update_size = i;
+ for (i = 0; i < update_size; i++)
+ lp55xx_write(chip, LP8501_REG_PROG_MEM + i, pattern[i]);
+
+ return 0;
+
+err:
+ dev_err(&chip->cl->dev, "wrong pattern format\n");
+ return -EINVAL;
+}
+
+static void lp8501_firmware_loaded(struct lp55xx_chip *chip)
+{
+ const struct firmware *fw = chip->fw;
+
+ if (fw->size > LP8501_PROGRAM_LENGTH) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
+ }
+
+ /*
+ * Program momery sequence
+ * 1) set engine mode to "LOAD"
+ * 2) write firmware data into program memory
+ */
+
+ lp8501_load_engine(chip);
+ lp8501_update_program_memory(chip, fw->data, fw->size);
+}
+
+static void lp8501_led_brightness_work(struct work_struct *work)
+{
+ struct lp55xx_led *led = container_of(work, struct lp55xx_led,
+ brightness_work);
+ struct lp55xx_chip *chip = led->chip;
+
+ mutex_lock(&chip->lock);
+ lp55xx_write(chip, LP8501_REG_LED_PWM_BASE + led->chan_nr,
+ led->brightness);
+ mutex_unlock(&chip->lock);
+}
+
+/* Chip specific configurations */
+static struct lp55xx_device_config lp8501_cfg = {
+ .reset = {
+ .addr = LP8501_REG_RESET,
+ .val = LP8501_RESET,
+ },
+ .enable = {
+ .addr = LP8501_REG_ENABLE,
+ .val = LP8501_ENABLE,
+ },
+ .max_channel = LP8501_MAX_LEDS,
+ .post_init_device = lp8501_post_init_device,
+ .brightness_work_fn = lp8501_led_brightness_work,
+ .set_led_current = lp8501_set_led_current,
+ .firmware_cb = lp8501_firmware_loaded,
+ .run_engine = lp8501_run_engine,
+};
+
+static int lp8501_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lp55xx_chip *chip;
+ struct lp55xx_led *led;
+ struct lp55xx_platform_data *pdata;
+ struct device_node *np = client->dev.of_node;
+
+ if (!dev_get_platdata(&client->dev)) {
+ if (np) {
+ ret = lp55xx_of_populate_pdata(&client->dev, np);
+ if (ret < 0)
+ return ret;
+ } else {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+ }
+ pdata = dev_get_platdata(&client->dev);
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ led = devm_kzalloc(&client->dev,
+ sizeof(*led) * pdata->num_channels, GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ chip->cl = client;
+ chip->pdata = pdata;
+ chip->cfg = &lp8501_cfg;
+
+ mutex_init(&chip->lock);
+
+ i2c_set_clientdata(client, led);
+
+ ret = lp55xx_init_device(chip);
+ if (ret)
+ goto err_init;
+
+ dev_info(&client->dev, "%s Programmable led chip found\n", id->name);
+
+ ret = lp55xx_register_leds(led, chip);
+ if (ret)
+ goto err_register_leds;
+
+ ret = lp55xx_register_sysfs(chip);
+ if (ret) {
+ dev_err(&client->dev, "registering sysfs failed\n");
+ goto err_register_sysfs;
+ }
+
+ return 0;
+
+err_register_sysfs:
+ lp55xx_unregister_leds(led, chip);
+err_register_leds:
+ lp55xx_deinit_device(chip);
+err_init:
+ return ret;
+}
+
+static int lp8501_remove(struct i2c_client *client)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(client);
+ struct lp55xx_chip *chip = led->chip;
+
+ lp8501_stop_engine(chip);
+ lp55xx_unregister_sysfs(chip);
+ lp55xx_unregister_leds(led, chip);
+ lp55xx_deinit_device(chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id lp8501_id[] = {
+ { "lp8501", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp8501_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_lp8501_leds_match[] = {
+ { .compatible = "ti,lp8501", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_lp8501_leds_match);
+#endif
+
+static struct i2c_driver lp8501_driver = {
+ .driver = {
+ .name = "lp8501",
+ .of_match_table = of_match_ptr(of_lp8501_leds_match),
+ },
+ .probe = lp8501_probe,
+ .remove = lp8501_remove,
+ .id_table = lp8501_id,
+};
+
+module_i2c_driver(lp8501_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8501 LED drvier");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index ca48a7d5..3417e5b 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -135,7 +135,7 @@ static void delete_lt3593_led(struct lt3593_led_data *led)
static int lt3593_led_probe(struct platform_device *pdev)
{
- struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct lt3593_led_data *leds_data;
int i, ret = 0;
@@ -169,7 +169,7 @@ err:
static int lt3593_led_remove(struct platform_device *pdev)
{
int i;
- struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct lt3593_led_data *leds_data;
leds_data = platform_get_drvdata(pdev);
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index c61c5eb..2f9f141 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -306,7 +306,7 @@ create_netxbig_led(struct platform_device *pdev,
struct netxbig_led_data *led_dat,
const struct netxbig_led *template)
{
- struct netxbig_led_platform_data *pdata = pdev->dev.platform_data;
+ struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
spin_lock_init(&led_dat->lock);
@@ -354,7 +354,7 @@ create_netxbig_led(struct platform_device *pdev,
static int netxbig_led_probe(struct platform_device *pdev)
{
- struct netxbig_led_platform_data *pdata = pdev->dev.platform_data;
+ struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct netxbig_led_data *leds_data;
int i;
int ret;
@@ -391,7 +391,7 @@ err_free_leds:
static int netxbig_led_remove(struct platform_device *pdev)
{
- struct netxbig_led_platform_data *pdata = pdev->dev.platform_data;
+ struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct netxbig_led_data *leds_data;
int i;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index e7df987..141f134 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -321,7 +321,7 @@ static inline int sizeof_ns2_led_priv(int num_leds)
static int ns2_led_probe(struct platform_device *pdev)
{
- struct ns2_led_platform_data *pdata = pdev->dev.platform_data;
+ struct ns2_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct ns2_led_priv *priv;
int i;
int ret;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 0c597bd..4a0e786 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -446,7 +446,8 @@ static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca9532_data *data = i2c_get_clientdata(client);
- struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data;
+ struct pca9532_platform_data *pca9532_pdata =
+ dev_get_platdata(&client->dev);
if (!pca9532_pdata)
return -EIO;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index edf485b..c3a08b6 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -267,7 +267,7 @@ static int pca955x_probe(struct i2c_client *client,
chip = &pca955x_chipdefs[id->driver_data];
adapter = to_i2c_adapter(client->dev.parent);
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
/* Make sure the slave address / chip type combo given is possible */
if ((client->addr & ~((1 << chip->slv_addr_shift) - 1)) !=
diff --git a/drivers/leds/leds-pca9633.c b/drivers/leds/leds-pca9633.c
deleted file mode 100644
index 9aae567..0000000
--- a/drivers/leds/leds-pca9633.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright 2011 bct electronic GmbH
- *
- * Author: Peter Meerwald <p.meerwald@bct-electronic.com>
- *
- * Based on leds-pca955x.c
- *
- * This file is subject to the terms and conditions of version 2 of
- * the GNU General Public License. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * LED driver for the PCA9633 I2C LED driver (7-bit slave address 0x62)
- *
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/leds.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
-#include <linux/platform_data/leds-pca9633.h>
-
-/* LED select registers determine the source that drives LED outputs */
-#define PCA9633_LED_OFF 0x0 /* LED driver off */
-#define PCA9633_LED_ON 0x1 /* LED driver on */
-#define PCA9633_LED_PWM 0x2 /* Controlled through PWM */
-#define PCA9633_LED_GRP_PWM 0x3 /* Controlled through PWM/GRPPWM */
-
-#define PCA9633_MODE1 0x00
-#define PCA9633_MODE2 0x01
-#define PCA9633_PWM_BASE 0x02
-#define PCA9633_LEDOUT 0x08
-
-static const struct i2c_device_id pca9633_id[] = {
- { "pca9633", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, pca9633_id);
-
-struct pca9633_led {
- struct i2c_client *client;
- struct work_struct work;
- enum led_brightness brightness;
- struct led_classdev led_cdev;
- int led_num; /* 0 .. 3 potentially */
- char name[32];
-};
-
-static void pca9633_led_work(struct work_struct *work)
-{
- struct pca9633_led *pca9633 = container_of(work,
- struct pca9633_led, work);
- u8 ledout = i2c_smbus_read_byte_data(pca9633->client, PCA9633_LEDOUT);
- int shift = 2 * pca9633->led_num;
- u8 mask = 0x3 << shift;
-
- switch (pca9633->brightness) {
- case LED_FULL:
- i2c_smbus_write_byte_data(pca9633->client, PCA9633_LEDOUT,
- (ledout & ~mask) | (PCA9633_LED_ON << shift));
- break;
- case LED_OFF:
- i2c_smbus_write_byte_data(pca9633->client, PCA9633_LEDOUT,
- ledout & ~mask);
- break;
- default:
- i2c_smbus_write_byte_data(pca9633->client,
- PCA9633_PWM_BASE + pca9633->led_num,
- pca9633->brightness);
- i2c_smbus_write_byte_data(pca9633->client, PCA9633_LEDOUT,
- (ledout & ~mask) | (PCA9633_LED_PWM << shift));
- break;
- }
-}
-
-static void pca9633_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct pca9633_led *pca9633;
-
- pca9633 = container_of(led_cdev, struct pca9633_led, led_cdev);
-
- pca9633->brightness = value;
-
- /*
- * Must use workqueue for the actual I/O since I2C operations
- * can sleep.
- */
- schedule_work(&pca9633->work);
-}
-
-static int pca9633_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct pca9633_led *pca9633;
- struct pca9633_platform_data *pdata;
- int i, err;
-
- pdata = client->dev.platform_data;
-
- if (pdata) {
- if (pdata->leds.num_leds <= 0 || pdata->leds.num_leds > 4) {
- dev_err(&client->dev, "board info must claim at most 4 LEDs");
- return -EINVAL;
- }
- }
-
- pca9633 = devm_kzalloc(&client->dev, 4 * sizeof(*pca9633), GFP_KERNEL);
- if (!pca9633)
- return -ENOMEM;
-
- i2c_set_clientdata(client, pca9633);
-
- for (i = 0; i < 4; i++) {
- pca9633[i].client = client;
- pca9633[i].led_num = i;
-
- /* Platform data can specify LED names and default triggers */
- if (pdata && i < pdata->leds.num_leds) {
- if (pdata->leds.leds[i].name)
- snprintf(pca9633[i].name,
- sizeof(pca9633[i].name), "pca9633:%s",
- pdata->leds.leds[i].name);
- if (pdata->leds.leds[i].default_trigger)
- pca9633[i].led_cdev.default_trigger =
- pdata->leds.leds[i].default_trigger;
- } else {
- snprintf(pca9633[i].name, sizeof(pca9633[i].name),
- "pca9633:%d", i);
- }
-
- pca9633[i].led_cdev.name = pca9633[i].name;
- pca9633[i].led_cdev.brightness_set = pca9633_led_set;
-
- INIT_WORK(&pca9633[i].work, pca9633_led_work);
-
- err = led_classdev_register(&client->dev, &pca9633[i].led_cdev);
- if (err < 0)
- goto exit;
- }
-
- /* Disable LED all-call address and set normal mode */
- i2c_smbus_write_byte_data(client, PCA9633_MODE1, 0x00);
-
- /* Configure output: open-drain or totem pole (push-pull) */
- if (pdata && pdata->outdrv == PCA9633_OPEN_DRAIN)
- i2c_smbus_write_byte_data(client, PCA9633_MODE2, 0x01);
-
- /* Turn off LEDs */
- i2c_smbus_write_byte_data(client, PCA9633_LEDOUT, 0x00);
-
- return 0;
-
-exit:
- while (i--) {
- led_classdev_unregister(&pca9633[i].led_cdev);
- cancel_work_sync(&pca9633[i].work);
- }
-
- return err;
-}
-
-static int pca9633_remove(struct i2c_client *client)
-{
- struct pca9633_led *pca9633 = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i < 4; i++) {
- led_classdev_unregister(&pca9633[i].led_cdev);
- cancel_work_sync(&pca9633[i].work);
- }
-
- return 0;
-}
-
-static struct i2c_driver pca9633_driver = {
- .driver = {
- .name = "leds-pca9633",
- .owner = THIS_MODULE,
- },
- .probe = pca9633_probe,
- .remove = pca9633_remove,
- .id_table = pca9633_id,
-};
-
-module_i2c_driver(pca9633_driver);
-
-MODULE_AUTHOR("Peter Meerwald <p.meerwald@bct-electronic.com>");
-MODULE_DESCRIPTION("PCA9633 LED driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
new file mode 100644
index 0000000..82589c0
--- /dev/null
+++ b/drivers/leds/leds-pca963x.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright 2011 bct electronic GmbH
+ * Copyright 2013 Qtechnology/AS
+ *
+ * Author: Peter Meerwald <p.meerwald@bct-electronic.com>
+ * Author: Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ *
+ * Based on leds-pca955x.c
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * LED driver for the PCA9633 I2C LED driver (7-bit slave address 0x62)
+ * LED driver for the PCA9634 I2C LED driver (7-bit slave address set by hw.)
+ *
+ * Note that hardware blinking violates the leds infrastructure driver
+ * interface since the hardware only supports blinking all LEDs with the
+ * same delay_on/delay_off rates. That is, only the LEDs that are set to
+ * blink will actually blink but all LEDs that are set to blink will blink
+ * in identical fashion. The delay_on/delay_off values of the last LED
+ * that is set to blink will be used for all of the blinking LEDs.
+ * Hardware blinking is disabled by default but can be enabled by setting
+ * the 'blink_type' member in the platform_data struct to 'PCA963X_HW_BLINK'
+ * or by adding the 'nxp,hw-blink' property to the DTS.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/platform_data/leds-pca963x.h>
+
+/* LED select registers determine the source that drives LED outputs */
+#define PCA963X_LED_OFF 0x0 /* LED driver off */
+#define PCA963X_LED_ON 0x1 /* LED driver on */
+#define PCA963X_LED_PWM 0x2 /* Controlled through PWM */
+#define PCA963X_LED_GRP_PWM 0x3 /* Controlled through PWM/GRPPWM */
+
+#define PCA963X_MODE2_DMBLNK 0x20 /* Enable blinking */
+
+#define PCA963X_MODE1 0x00
+#define PCA963X_MODE2 0x01
+#define PCA963X_PWM_BASE 0x02
+
+enum pca963x_type {
+ pca9633,
+ pca9634,
+};
+
+struct pca963x_chipdef {
+ u8 grppwm;
+ u8 grpfreq;
+ u8 ledout_base;
+ int n_leds;
+};
+
+static struct pca963x_chipdef pca963x_chipdefs[] = {
+ [pca9633] = {
+ .grppwm = 0x6,
+ .grpfreq = 0x7,
+ .ledout_base = 0x8,
+ .n_leds = 4,
+ },
+ [pca9634] = {
+ .grppwm = 0xa,
+ .grpfreq = 0xb,
+ .ledout_base = 0xc,
+ .n_leds = 8,
+ },
+};
+
+/* Total blink period in milliseconds */
+#define PCA963X_BLINK_PERIOD_MIN 42
+#define PCA963X_BLINK_PERIOD_MAX 10667
+
+static const struct i2c_device_id pca963x_id[] = {
+ { "pca9632", pca9633 },
+ { "pca9633", pca9633 },
+ { "pca9634", pca9634 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pca963x_id);
+
+enum pca963x_cmd {
+ BRIGHTNESS_SET,
+ BLINK_SET,
+};
+
+struct pca963x_led;
+
+struct pca963x {
+ struct pca963x_chipdef *chipdef;
+ struct mutex mutex;
+ struct i2c_client *client;
+ struct pca963x_led *leds;
+};
+
+struct pca963x_led {
+ struct pca963x *chip;
+ struct work_struct work;
+ enum led_brightness brightness;
+ struct led_classdev led_cdev;
+ int led_num; /* 0 .. 7 potentially */
+ enum pca963x_cmd cmd;
+ char name[32];
+ u8 gdc;
+ u8 gfrq;
+};
+
+static void pca963x_brightness_work(struct pca963x_led *pca963x)
+{
+ u8 ledout_addr = pca963x->chip->chipdef->ledout_base
+ + (pca963x->led_num / 4);
+ u8 ledout;
+ int shift = 2 * (pca963x->led_num % 4);
+ u8 mask = 0x3 << shift;
+
+ mutex_lock(&pca963x->chip->mutex);
+ ledout = i2c_smbus_read_byte_data(pca963x->chip->client, ledout_addr);
+ switch (pca963x->brightness) {
+ case LED_FULL:
+ i2c_smbus_write_byte_data(pca963x->chip->client, ledout_addr,
+ (ledout & ~mask) | (PCA963X_LED_ON << shift));
+ break;
+ case LED_OFF:
+ i2c_smbus_write_byte_data(pca963x->chip->client, ledout_addr,
+ ledout & ~mask);
+ break;
+ default:
+ i2c_smbus_write_byte_data(pca963x->chip->client,
+ PCA963X_PWM_BASE + pca963x->led_num,
+ pca963x->brightness);
+ i2c_smbus_write_byte_data(pca963x->chip->client, ledout_addr,
+ (ledout & ~mask) | (PCA963X_LED_PWM << shift));
+ break;
+ }
+ mutex_unlock(&pca963x->chip->mutex);
+}
+
+static void pca963x_blink_work(struct pca963x_led *pca963x)
+{
+ u8 ledout_addr = pca963x->chip->chipdef->ledout_base +
+ (pca963x->led_num / 4);
+ u8 ledout;
+ u8 mode2 = i2c_smbus_read_byte_data(pca963x->chip->client,
+ PCA963X_MODE2);
+ int shift = 2 * (pca963x->led_num % 4);
+ u8 mask = 0x3 << shift;
+
+ i2c_smbus_write_byte_data(pca963x->chip->client,
+ pca963x->chip->chipdef->grppwm, pca963x->gdc);
+
+ i2c_smbus_write_byte_data(pca963x->chip->client,
+ pca963x->chip->chipdef->grpfreq, pca963x->gfrq);
+
+ if (!(mode2 & PCA963X_MODE2_DMBLNK))
+ i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
+ mode2 | PCA963X_MODE2_DMBLNK);
+
+ mutex_lock(&pca963x->chip->mutex);
+ ledout = i2c_smbus_read_byte_data(pca963x->chip->client, ledout_addr);
+ if ((ledout & mask) != (PCA963X_LED_GRP_PWM << shift))
+ i2c_smbus_write_byte_data(pca963x->chip->client, ledout_addr,
+ (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift));
+ mutex_unlock(&pca963x->chip->mutex);
+}
+
+static void pca963x_work(struct work_struct *work)
+{
+ struct pca963x_led *pca963x = container_of(work,
+ struct pca963x_led, work);
+
+ switch (pca963x->cmd) {
+ case BRIGHTNESS_SET:
+ pca963x_brightness_work(pca963x);
+ break;
+ case BLINK_SET:
+ pca963x_blink_work(pca963x);
+ break;
+ }
+}
+
+static void pca963x_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct pca963x_led *pca963x;
+
+ pca963x = container_of(led_cdev, struct pca963x_led, led_cdev);
+
+ pca963x->cmd = BRIGHTNESS_SET;
+ pca963x->brightness = value;
+
+ /*
+ * Must use workqueue for the actual I/O since I2C operations
+ * can sleep.
+ */
+ schedule_work(&pca963x->work);
+}
+
+static int pca963x_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct pca963x_led *pca963x;
+ unsigned long time_on, time_off, period;
+ u8 gdc, gfrq;
+
+ pca963x = container_of(led_cdev, struct pca963x_led, led_cdev);
+
+ time_on = *delay_on;
+ time_off = *delay_off;
+
+ /* If both zero, pick reasonable defaults of 500ms each */
+ if (!time_on && !time_off) {
+ time_on = 500;
+ time_off = 500;
+ }
+
+ period = time_on + time_off;
+
+ /* If period not supported by hardware, default to someting sane. */
+ if ((period < PCA963X_BLINK_PERIOD_MIN) ||
+ (period > PCA963X_BLINK_PERIOD_MAX)) {
+ time_on = 500;
+ time_off = 500;
+ period = time_on + time_off;
+ }
+
+ /*
+ * From manual: duty cycle = (GDC / 256) ->
+ * (time_on / period) = (GDC / 256) ->
+ * GDC = ((time_on * 256) / period)
+ */
+ gdc = (time_on * 256) / period;
+
+ /*
+ * From manual: period = ((GFRQ + 1) / 24) in seconds.
+ * So, period (in ms) = (((GFRQ + 1) / 24) * 1000) ->
+ * GFRQ = ((period * 24 / 1000) - 1)
+ */
+ gfrq = (period * 24 / 1000) - 1;
+
+ pca963x->cmd = BLINK_SET;
+ pca963x->gdc = gdc;
+ pca963x->gfrq = gfrq;
+
+ /*
+ * Must use workqueue for the actual I/O since I2C operations
+ * can sleep.
+ */
+ schedule_work(&pca963x->work);
+
+ *delay_on = time_on;
+ *delay_off = time_off;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static struct pca963x_platform_data *
+pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
+{
+ struct device_node *np = client->dev.of_node, *child;
+ struct pca963x_platform_data *pdata;
+ struct led_info *pca963x_leds;
+ int count;
+
+ count = of_get_child_count(np);
+ if (!count || count > chip->n_leds)
+ return ERR_PTR(-ENODEV);
+
+ pca963x_leds = devm_kzalloc(&client->dev,
+ sizeof(struct led_info) * chip->n_leds, GFP_KERNEL);
+ if (!pca963x_leds)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_child_of_node(np, child) {
+ struct led_info led;
+ u32 reg;
+ int res;
+
+ res = of_property_read_u32(child, "reg", &reg);
+ if ((res != 0) || (reg >= chip->n_leds))
+ continue;
+ led.name =
+ of_get_property(child, "label", NULL) ? : child->name;
+ led.default_trigger =
+ of_get_property(child, "linux,default-trigger", NULL);
+ pca963x_leds[reg] = led;
+ }
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct pca963x_platform_data), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->leds.leds = pca963x_leds;
+ pdata->leds.num_leds = chip->n_leds;
+
+ /* default to open-drain unless totem pole (push-pull) is specified */
+ if (of_property_read_bool(np, "nxp,totem-pole"))
+ pdata->outdrv = PCA963X_TOTEM_POLE;
+ else
+ pdata->outdrv = PCA963X_OPEN_DRAIN;
+
+ /* default to software blinking unless hardware blinking is specified */
+ if (of_property_read_bool(np, "nxp,hw-blink"))
+ pdata->blink_type = PCA963X_HW_BLINK;
+ else
+ pdata->blink_type = PCA963X_SW_BLINK;
+
+ return pdata;
+}
+
+static const struct of_device_id of_pca963x_match[] = {
+ { .compatible = "nxp,pca9632", },
+ { .compatible = "nxp,pca9633", },
+ { .compatible = "nxp,pca9634", },
+ {},
+};
+#else
+static struct pca963x_platform_data *
+pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+static int pca963x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pca963x *pca963x_chip;
+ struct pca963x_led *pca963x;
+ struct pca963x_platform_data *pdata;
+ struct pca963x_chipdef *chip;
+ int i, err;
+
+ chip = &pca963x_chipdefs[id->driver_data];
+ pdata = dev_get_platdata(&client->dev);
+
+ if (!pdata) {
+ pdata = pca963x_dt_init(client, chip);
+ if (IS_ERR(pdata)) {
+ dev_warn(&client->dev, "could not parse configuration\n");
+ pdata = NULL;
+ }
+ }
+
+ if (pdata && (pdata->leds.num_leds < 1 ||
+ pdata->leds.num_leds > chip->n_leds)) {
+ dev_err(&client->dev, "board info must claim 1-%d LEDs",
+ chip->n_leds);
+ return -EINVAL;
+ }
+
+ pca963x_chip = devm_kzalloc(&client->dev, sizeof(*pca963x_chip),
+ GFP_KERNEL);
+ if (!pca963x_chip)
+ return -ENOMEM;
+ pca963x = devm_kzalloc(&client->dev, chip->n_leds * sizeof(*pca963x),
+ GFP_KERNEL);
+ if (!pca963x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, pca963x_chip);
+
+ mutex_init(&pca963x_chip->mutex);
+ pca963x_chip->chipdef = chip;
+ pca963x_chip->client = client;
+ pca963x_chip->leds = pca963x;
+
+ /* Turn off LEDs by default*/
+ i2c_smbus_write_byte_data(client, chip->ledout_base, 0x00);
+ if (chip->n_leds > 4)
+ i2c_smbus_write_byte_data(client, chip->ledout_base + 1, 0x00);
+
+ for (i = 0; i < chip->n_leds; i++) {
+ pca963x[i].led_num = i;
+ pca963x[i].chip = pca963x_chip;
+
+ /* Platform data can specify LED names and default triggers */
+ if (pdata && i < pdata->leds.num_leds) {
+ if (pdata->leds.leds[i].name)
+ snprintf(pca963x[i].name,
+ sizeof(pca963x[i].name), "pca963x:%s",
+ pdata->leds.leds[i].name);
+ if (pdata->leds.leds[i].default_trigger)
+ pca963x[i].led_cdev.default_trigger =
+ pdata->leds.leds[i].default_trigger;
+ }
+ if (!pdata || i >= pdata->leds.num_leds ||
+ !pdata->leds.leds[i].name)
+ snprintf(pca963x[i].name, sizeof(pca963x[i].name),
+ "pca963x:%d:%.2x:%d", client->adapter->nr,
+ client->addr, i);
+
+ pca963x[i].led_cdev.name = pca963x[i].name;
+ pca963x[i].led_cdev.brightness_set = pca963x_led_set;
+
+ if (pdata && pdata->blink_type == PCA963X_HW_BLINK)
+ pca963x[i].led_cdev.blink_set = pca963x_blink_set;
+
+ INIT_WORK(&pca963x[i].work, pca963x_work);
+
+ err = led_classdev_register(&client->dev, &pca963x[i].led_cdev);
+ if (err < 0)
+ goto exit;
+ }
+
+ /* Disable LED all-call address and set normal mode */
+ i2c_smbus_write_byte_data(client, PCA963X_MODE1, 0x00);
+
+ /* Configure output: open-drain or totem pole (push-pull) */
+ if (pdata && pdata->outdrv == PCA963X_OPEN_DRAIN)
+ i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x01);
+
+ return 0;
+
+exit:
+ while (i--) {
+ led_classdev_unregister(&pca963x[i].led_cdev);
+ cancel_work_sync(&pca963x[i].work);
+ }
+
+ return err;
+}
+
+static int pca963x_remove(struct i2c_client *client)
+{
+ struct pca963x *pca963x = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pca963x->chipdef->n_leds; i++) {
+ led_classdev_unregister(&pca963x->leds[i].led_cdev);
+ cancel_work_sync(&pca963x->leds[i].work);
+ }
+
+ return 0;
+}
+
+static struct i2c_driver pca963x_driver = {
+ .driver = {
+ .name = "leds-pca963x",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_pca963x_match),
+ },
+ .probe = pca963x_probe,
+ .remove = pca963x_remove,
+ .id_table = pca963x_id,
+};
+
+module_i2c_driver(pca963x_driver);
+
+MODULE_AUTHOR("Peter Meerwald <p.meerwald@bct-electronic.com>");
+MODULE_DESCRIPTION("PCA963X LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index faf52c0..bb6f948 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -147,7 +147,7 @@ err:
static int led_pwm_probe(struct platform_device *pdev)
{
- struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
+ struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct led_pwm_priv *priv;
int i, ret = 0;
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 4253a9b..358430d 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -142,7 +142,8 @@ static void regulator_led_brightness_set(struct led_classdev *led_cdev,
static int regulator_led_probe(struct platform_device *pdev)
{
- struct led_regulator_platform_data *pdata = pdev->dev.platform_data;
+ struct led_regulator_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
struct regulator_led *led;
struct regulator *vcc;
int ret = 0;
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index e1a0df6..76483fb 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -71,7 +71,7 @@ static int s3c24xx_led_remove(struct platform_device *dev)
static int s3c24xx_led_probe(struct platform_device *dev)
{
- struct s3c24xx_led_platdata *pdata = dev->dev.platform_data;
+ struct s3c24xx_led_platdata *pdata = dev_get_platdata(&dev->dev);
struct s3c24xx_gpio_led *led;
int ret;
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 64e204e..5b8f938 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
* detected as working, but in reality it is not) as low as
* possible.
*/
-static struct dmi_system_id __initdata nas_led_whitelist[] = {
+static struct dmi_system_id nas_led_whitelist[] __initdata = {
{
.callback = ss4200_led_dmi_callback,
.ident = "Intel SS4200-E",
@@ -197,7 +197,7 @@ static void nasgpio_led_set_attr(struct led_classdev *led_cdev,
spin_unlock(&nasgpio_gpio_lock);
}
-u32 nasgpio_led_get_attr(struct led_classdev *led_cdev, u32 port)
+static u32 nasgpio_led_get_attr(struct led_classdev *led_cdev, u32 port)
{
struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
u32 gpio_in;
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 98fe021..8cc304f 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -737,7 +737,7 @@ static int tca6507_probe(struct i2c_client *client,
int i = 0;
adapter = to_i2c_adapter(client->dev.parent);
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -EIO;
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 120815a..0a1a13f 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -230,9 +230,9 @@ static int wm831x_status_probe(struct platform_device *pdev)
int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
int ret;
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res == NULL) {
- dev_err(&pdev->dev, "No I/O resource\n");
+ dev_err(&pdev->dev, "No register resource\n");
ret = -EINVAL;
goto err;
}
@@ -246,8 +246,8 @@ static int wm831x_status_probe(struct platform_device *pdev)
drvdata->wm831x = wm831x;
drvdata->reg = res->start;
- if (wm831x->dev->platform_data)
- chip_pdata = wm831x->dev->platform_data;
+ if (dev_get_platdata(wm831x->dev))
+ chip_pdata = dev_get_platdata(wm831x->dev);
else
chip_pdata = NULL;
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 8a181d5..3f75fd2 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -203,7 +203,7 @@ static int wm8350_led_probe(struct platform_device *pdev)
{
struct regulator *isink, *dcdc;
struct wm8350_led *led;
- struct wm8350_led_platform_data *pdata = pdev->dev.platform_data;
+ struct wm8350_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i;
if (pdata == NULL) {
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 3c9c88a..47e55aa 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -36,26 +36,28 @@ static int fb_notifier_callback(struct notifier_block *p,
struct bl_trig_notifier, notifier);
struct led_classdev *led = n->led;
struct fb_event *fb_event = data;
- int *blank = fb_event->data;
- int new_status = *blank ? BLANK : UNBLANK;
+ int *blank;
+ int new_status;
- switch (event) {
- case FB_EVENT_BLANK:
- if (new_status == n->old_status)
- break;
+ /* If we aren't interested in this event, skip it immediately ... */
+ if (event != FB_EVENT_BLANK)
+ return 0;
- if ((n->old_status == UNBLANK) ^ n->invert) {
- n->brightness = led->brightness;
- __led_set_brightness(led, LED_OFF);
- } else {
- __led_set_brightness(led, n->brightness);
- }
+ blank = fb_event->data;
+ new_status = *blank ? BLANK : UNBLANK;
- n->old_status = new_status;
+ if (new_status == n->old_status)
+ return 0;
- break;
+ if ((n->old_status == UNBLANK) ^ n->invert) {
+ n->brightness = led->brightness;
+ __led_set_brightness(led, LED_OFF);
+ } else {
+ __led_set_brightness(led, n->brightness);
}
+ n->old_status = new_status;
+
return 0;
}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 28433a1..70dfcdc 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -140,6 +140,16 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
cpu->regs->eip = idt_address(lo, hi);
/*
+ * Trapping always clears these flags:
+ * TF: Trap flag
+ * VM: Virtual 8086 mode
+ * RF: Resume
+ * NT: Nested task.
+ */
+ cpu->regs->eflags &=
+ ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
+
+ /*
* There are two kinds of interrupt handlers: 0xE is an "interrupt
* gate" which expects interrupts to be disabled on entry.
*/
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index a35d8d1..bfb39bb 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -669,8 +669,10 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
#ifdef CONFIG_X86_PAE
gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
- if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) {
kill_guest(cpu, "Bad address %#lx", vaddr);
+ return -1UL;
+ }
gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
#else
gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 5ef78ef..2acc43f 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -3,7 +3,7 @@
#
dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
- dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o
+ dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ee37288..f9764e6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -597,24 +597,19 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
return 0;
}
-static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long bch_mca_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct cache_set *c = container_of(shrink, struct cache_set, shrink);
struct btree *b, *t;
unsigned long i, nr = sc->nr_to_scan;
+ unsigned long freed = 0;
if (c->shrinker_disabled)
- return 0;
+ return SHRINK_STOP;
if (c->try_harder)
- return 0;
-
- /*
- * If nr == 0, we're supposed to return the number of items we have
- * cached. Not allowed to return -1.
- */
- if (!nr)
- return mca_can_free(c) * c->btree_pages;
+ return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
if (sc->gfp_mask & __GFP_WAIT)
@@ -634,14 +629,14 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
i = 0;
list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
- if (!nr)
+ if (freed >= nr)
break;
if (++i > 3 &&
!mca_reap(b, NULL, 0)) {
mca_data_free(b);
rw_unlock(true, b);
- --nr;
+ freed++;
}
}
@@ -652,7 +647,7 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
if (list_empty(&c->btree_cache))
goto out;
- for (i = 0; nr && i < c->bucket_cache_used; i++) {
+ for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
b = list_first_entry(&c->btree_cache, struct btree, list);
list_rotate_left(&c->btree_cache);
@@ -661,14 +656,27 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
- --nr;
+ freed++;
} else
b->accessed = 0;
}
out:
- nr = mca_can_free(c) * c->btree_pages;
mutex_unlock(&c->bucket_lock);
- return nr;
+ return freed;
+}
+
+static unsigned long bch_mca_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+
+ if (c->shrinker_disabled)
+ return 0;
+
+ if (c->try_harder)
+ return 0;
+
+ return mca_can_free(c) * c->btree_pages;
}
void bch_btree_cache_free(struct cache_set *c)
@@ -737,7 +745,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->verify_data = NULL;
#endif
- c->shrink.shrink = bch_mca_shrink;
+ c->shrink.count_objects = bch_mca_count;
+ c->shrink.scan_objects = bch_mca_scan;
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
register_shrinker(&c->shrink);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 12a2c28..4fe6ab2 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -556,7 +556,7 @@ STORE(__bch_cache_set)
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->shrink.shrink(&c->shrink, &sc);
+ c->shrink.scan_objects(&c->shrink, &sc);
}
sysfs_strtoul(congested_read_threshold_us,
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5227e07..173cbb2 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1425,62 +1425,75 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
unsigned long max_jiffies)
{
if (jiffies - b->last_accessed < max_jiffies)
- return 1;
+ return 0;
if (!(gfp & __GFP_IO)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
- return 1;
+ return 0;
}
if (b->hold_count)
- return 1;
+ return 0;
__make_buffer_clean(b);
__unlink_buffer(b);
__free_buffer_wake(b);
- return 0;
+ return 1;
}
-static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
- struct shrink_control *sc)
+static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ gfp_t gfp_mask)
{
int l;
struct dm_buffer *b, *tmp;
+ long freed = 0;
for (l = 0; l < LIST_SIZE; l++) {
- list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
- if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
- !--nr_to_scan)
- return;
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+ freed += __cleanup_old_buffer(b, gfp_mask, 0);
+ if (!--nr_to_scan)
+ break;
+ }
dm_bufio_cond_resched();
}
+ return freed;
}
-static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c =
- container_of(shrinker, struct dm_bufio_client, shrinker);
- unsigned long r;
- unsigned long nr_to_scan = sc->nr_to_scan;
+ struct dm_bufio_client *c;
+ unsigned long freed;
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
if (sc->gfp_mask & __GFP_IO)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
- return !nr_to_scan ? 0 : -1;
+ return SHRINK_STOP;
- if (nr_to_scan)
- __scan(c, nr_to_scan, sc);
+ freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
+ dm_bufio_unlock(c);
+ return freed;
+}
- r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
- if (r > INT_MAX)
- r = INT_MAX;
+static unsigned long
+dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct dm_bufio_client *c;
+ unsigned long count;
- dm_bufio_unlock(c);
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
+ if (sc->gfp_mask & __GFP_IO)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
+ return 0;
- return r;
+ count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
+ dm_bufio_unlock(c);
+ return count;
}
/*
@@ -1582,7 +1595,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- c->shrinker.shrink = shrink;
+ c->shrinker.count_objects = dm_bufio_shrink_count;
+ c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
register_shrinker(&c->shrinker);
@@ -1669,7 +1683,7 @@ static void cleanup_old_buffers(void)
struct dm_buffer *b;
b = list_entry(c->lru[LIST_CLEAN].prev,
struct dm_buffer, lru_list);
- if (__cleanup_old_buffer(b, 0, max_age * HZ))
+ if (!__cleanup_old_buffer(b, 0, max_age * HZ))
break;
dm_bufio_cond_resched();
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0df3ec0..2956976 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -67,9 +67,11 @@ static void free_bitset(unsigned long *bits)
#define MIGRATION_COUNT_WINDOW 10
/*
- * The block size of the device holding cache data must be >= 32KB
+ * The block size of the device holding cache data must be
+ * between 32KB and 1GB.
*/
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
/*
* FIXME: the cache is read/write for the time being.
@@ -101,6 +103,8 @@ struct cache {
struct dm_target *ti;
struct dm_target_callbacks callbacks;
+ struct dm_cache_metadata *cmd;
+
/*
* Metadata is written to this device.
*/
@@ -117,11 +121,6 @@ struct cache {
struct dm_dev *cache_dev;
/*
- * Cache features such as write-through.
- */
- struct cache_features features;
-
- /*
* Size of the origin device in _complete_ blocks and native sectors.
*/
dm_oblock_t origin_blocks;
@@ -138,8 +137,6 @@ struct cache {
uint32_t sectors_per_block;
int sectors_per_block_shift;
- struct dm_cache_metadata *cmd;
-
spinlock_t lock;
struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
@@ -148,8 +145,8 @@ struct cache {
struct list_head completed_migrations;
struct list_head need_commit_migrations;
sector_t migration_threshold;
- atomic_t nr_migrations;
wait_queue_head_t migration_wait;
+ atomic_t nr_migrations;
/*
* cache_size entries, dirty if set
@@ -160,9 +157,16 @@ struct cache {
/*
* origin_blocks entries, discarded if set.
*/
- uint32_t discard_block_size; /* a power of 2 times sectors per block */
dm_dblock_t discard_nr_blocks;
unsigned long *discard_bitset;
+ uint32_t discard_block_size; /* a power of 2 times sectors per block */
+
+ /*
+ * Rather than reconstructing the table line for the status we just
+ * save it and regurgitate.
+ */
+ unsigned nr_ctr_args;
+ const char **ctr_args;
struct dm_kcopyd_client *copier;
struct workqueue_struct *wq;
@@ -187,14 +191,12 @@ struct cache {
bool loaded_mappings:1;
bool loaded_discards:1;
- struct cache_stats stats;
-
/*
- * Rather than reconstructing the table line for the status we just
- * save it and regurgitate.
+ * Cache features such as write-through.
*/
- unsigned nr_ctr_args;
- const char **ctr_args;
+ struct cache_features features;
+
+ struct cache_stats stats;
};
struct per_bio_data {
@@ -1687,24 +1689,25 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
- unsigned long tmp;
+ unsigned long block_size;
if (!at_least_one_arg(as, error))
return -EINVAL;
- if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
- tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
- tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+ if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
+ block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+ block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
+ block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
*error = "Invalid data block size";
return -EINVAL;
}
- if (tmp > ca->cache_sectors) {
+ if (block_size > ca->cache_sectors) {
*error = "Data block size is larger than the cache device";
return -EINVAL;
}
- ca->block_size = tmp;
+ ca->block_size = block_size;
return 0;
}
@@ -2609,9 +2612,17 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct cache *cache = ti->private;
+ uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
- blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ /*
+ * If the system-determined stacked limits are compatible with the
+ * cache's blocksize (io_opt is a factor) do not override them.
+ */
+ if (io_opt_sectors < cache->sectors_per_block ||
+ do_div(io_opt_sectors, cache->sectors_per_block)) {
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ }
set_discard_limits(cache, limits);
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6d2d41a..0fce0bc 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1645,20 +1645,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io",
- WQ_NON_REENTRANT|
- WQ_MEM_RECLAIM,
- 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
cc->crypt_queue = alloc_workqueue("kcryptd",
- WQ_NON_REENTRANT|
- WQ_CPU_INTENSIVE|
- WQ_MEM_RECLAIM,
- 1);
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index f1b7586..afe0814 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -877,7 +877,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
if (new_data < param->data ||
- invalid_str(new_data, (void *) param + param_size) ||
+ invalid_str(new_data, (void *) param + param_size) || !*new_data ||
strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
DMWARN("Invalid new mapped device name or uuid string supplied.");
return -EINVAL;
@@ -1262,44 +1262,37 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
r = dm_table_create(&t, get_mode(param), param->target_count, md);
if (r)
- goto out;
+ goto err;
+ /* Protect md->type and md->queue against concurrent table loads. */
+ dm_lock_md_type(md);
r = populate_table(t, param, param_size);
- if (r) {
- dm_table_destroy(t);
- goto out;
- }
+ if (r)
+ goto err_unlock_md_type;
immutable_target_type = dm_get_immutable_target_type(md);
if (immutable_target_type &&
(immutable_target_type != dm_table_get_immutable_target_type(t))) {
DMWARN("can't replace immutable target type %s",
immutable_target_type->name);
- dm_table_destroy(t);
r = -EINVAL;
- goto out;
+ goto err_unlock_md_type;
}
- /* Protect md->type and md->queue against concurrent table loads. */
- dm_lock_md_type(md);
if (dm_get_md_type(md) == DM_TYPE_NONE)
/* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t));
else if (dm_get_md_type(md) != dm_table_get_type(t)) {
DMWARN("can't change device type after initial table load.");
- dm_table_destroy(t);
- dm_unlock_md_type(md);
r = -EINVAL;
- goto out;
+ goto err_unlock_md_type;
}
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md);
if (r) {
DMWARN("unable to set up device queue for new table.");
- dm_table_destroy(t);
- dm_unlock_md_type(md);
- goto out;
+ goto err_unlock_md_type;
}
dm_unlock_md_type(md);
@@ -1309,9 +1302,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
if (!hc || hc->md != md) {
DMWARN("device has been removed from the dev hash table.");
up_write(&_hash_lock);
- dm_table_destroy(t);
r = -ENXIO;
- goto out;
+ goto err_destroy_table;
}
if (hc->new_map)
@@ -1322,7 +1314,6 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
param->flags |= DM_INACTIVE_PRESENT_FLAG;
__dev_status(md, param);
-out:
if (old_map) {
dm_sync_table(md);
dm_table_destroy(old_map);
@@ -1330,6 +1321,15 @@ out:
dm_put(md);
+ return 0;
+
+err_unlock_md_type:
+ dm_unlock_md_type(md);
+err_destroy_table:
+ dm_table_destroy(t);
+err:
+ dm_put(md);
+
return r;
}
@@ -1455,20 +1455,26 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
return 0;
}
-static bool buffer_test_overflow(char *result, unsigned maxlen)
-{
- return !maxlen || strlen(result) + 1 >= maxlen;
-}
-
/*
- * Process device-mapper dependent messages.
+ * Process device-mapper dependent messages. Messages prefixed with '@'
+ * are processed by the DM core. All others are delivered to the target.
* Returns a number <= 1 if message was processed by device mapper.
* Returns 2 if message should be delivered to the target.
*/
static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
char *result, unsigned maxlen)
{
- return 2;
+ int r;
+
+ if (**argv != '@')
+ return 2; /* no '@' prefix, deliver to target */
+
+ r = dm_stats_message(md, argc, argv, result, maxlen);
+ if (r < 2)
+ return r;
+
+ DMERR("Unsupported message sent to DM core: %s", argv[0]);
+ return -EINVAL;
}
/*
@@ -1542,7 +1548,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
if (r == 1) {
param->flags |= DM_DATA_OUT_FLAG;
- if (buffer_test_overflow(result, maxlen))
+ if (dm_message_test_buffer_overflow(result, maxlen))
param->flags |= DM_BUFFER_FULL_FLAG;
else
param->data_size = param->data_start + strlen(result) + 1;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d581fe5..3a7cade 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -833,8 +833,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
goto bad_slab;
INIT_WORK(&kc->kcopyd_work, do_work);
- kc->kcopyd_wq = alloc_workqueue("kcopyd",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
if (!kc->kcopyd_wq)
goto bad_workqueue;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 699b5be..9584443 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1080,8 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
- ms->kmirrord_wq = alloc_workqueue("kmirrord",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
if (!ms->kmirrord_wq) {
DMERR("couldn't start kmirrord");
r = -ENOMEM;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
new file mode 100644
index 0000000..8ae31e8
--- /dev/null
+++ b/drivers/md/dm-stats.c
@@ -0,0 +1,969 @@
+#include <linux/errno.h>
+#include <linux/numa.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include <linux/threads.h>
+#include <linux/preempt.h>
+#include <linux/irqflags.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/device-mapper.h>
+
+#include "dm.h"
+#include "dm-stats.h"
+
+#define DM_MSG_PREFIX "stats"
+
+static int dm_stat_need_rcu_barrier;
+
+/*
+ * Using 64-bit values to avoid overflow (which is a
+ * problem that block/genhd.c's IO accounting has).
+ */
+struct dm_stat_percpu {
+ unsigned long long sectors[2];
+ unsigned long long ios[2];
+ unsigned long long merges[2];
+ unsigned long long ticks[2];
+ unsigned long long io_ticks[2];
+ unsigned long long io_ticks_total;
+ unsigned long long time_in_queue;
+};
+
+struct dm_stat_shared {
+ atomic_t in_flight[2];
+ unsigned long stamp;
+ struct dm_stat_percpu tmp;
+};
+
+struct dm_stat {
+ struct list_head list_entry;
+ int id;
+ size_t n_entries;
+ sector_t start;
+ sector_t end;
+ sector_t step;
+ const char *program_id;
+ const char *aux_data;
+ struct rcu_head rcu_head;
+ size_t shared_alloc_size;
+ size_t percpu_alloc_size;
+ struct dm_stat_percpu *stat_percpu[NR_CPUS];
+ struct dm_stat_shared stat_shared[0];
+};
+
+struct dm_stats_last_position {
+ sector_t last_sector;
+ unsigned last_rw;
+};
+
+/*
+ * A typo on the command line could possibly make the kernel run out of memory
+ * and crash. To prevent the crash we account all used memory. We fail if we
+ * exhaust 1/4 of all memory or 1/2 of vmalloc space.
+ */
+#define DM_STATS_MEMORY_FACTOR 4
+#define DM_STATS_VMALLOC_FACTOR 2
+
+static DEFINE_SPINLOCK(shared_memory_lock);
+
+static unsigned long shared_memory_amount;
+
+static bool __check_shared_memory(size_t alloc_size)
+{
+ size_t a;
+
+ a = shared_memory_amount + alloc_size;
+ if (a < shared_memory_amount)
+ return false;
+ if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
+ return false;
+#ifdef CONFIG_MMU
+ if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
+ return false;
+#endif
+ return true;
+}
+
+static bool check_shared_memory(size_t alloc_size)
+{
+ bool ret;
+
+ spin_lock_irq(&shared_memory_lock);
+
+ ret = __check_shared_memory(alloc_size);
+
+ spin_unlock_irq(&shared_memory_lock);
+
+ return ret;
+}
+
+static bool claim_shared_memory(size_t alloc_size)
+{
+ spin_lock_irq(&shared_memory_lock);
+
+ if (!__check_shared_memory(alloc_size)) {
+ spin_unlock_irq(&shared_memory_lock);
+ return false;
+ }
+
+ shared_memory_amount += alloc_size;
+
+ spin_unlock_irq(&shared_memory_lock);
+
+ return true;
+}
+
+static void free_shared_memory(size_t alloc_size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&shared_memory_lock, flags);
+
+ if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
+ spin_unlock_irqrestore(&shared_memory_lock, flags);
+ DMCRIT("Memory usage accounting bug.");
+ return;
+ }
+
+ shared_memory_amount -= alloc_size;
+
+ spin_unlock_irqrestore(&shared_memory_lock, flags);
+}
+
+static void *dm_kvzalloc(size_t alloc_size, int node)
+{
+ void *p;
+
+ if (!claim_shared_memory(alloc_size))
+ return NULL;
+
+ if (alloc_size <= KMALLOC_MAX_SIZE) {
+ p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
+ if (p)
+ return p;
+ }
+ p = vzalloc_node(alloc_size, node);
+ if (p)
+ return p;
+
+ free_shared_memory(alloc_size);
+
+ return NULL;
+}
+
+static void dm_kvfree(void *ptr, size_t alloc_size)
+{
+ if (!ptr)
+ return;
+
+ free_shared_memory(alloc_size);
+
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+static void dm_stat_free(struct rcu_head *head)
+{
+ int cpu;
+ struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
+
+ kfree(s->program_id);
+ kfree(s->aux_data);
+ for_each_possible_cpu(cpu)
+ dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
+ dm_kvfree(s, s->shared_alloc_size);
+}
+
+static int dm_stat_in_flight(struct dm_stat_shared *shared)
+{
+ return atomic_read(&shared->in_flight[READ]) +
+ atomic_read(&shared->in_flight[WRITE]);
+}
+
+void dm_stats_init(struct dm_stats *stats)
+{
+ int cpu;
+ struct dm_stats_last_position *last;
+
+ mutex_init(&stats->mutex);
+ INIT_LIST_HEAD(&stats->list);
+ stats->last = alloc_percpu(struct dm_stats_last_position);
+ for_each_possible_cpu(cpu) {
+ last = per_cpu_ptr(stats->last, cpu);
+ last->last_sector = (sector_t)ULLONG_MAX;
+ last->last_rw = UINT_MAX;
+ }
+}
+
+void dm_stats_cleanup(struct dm_stats *stats)
+{
+ size_t ni;
+ struct dm_stat *s;
+ struct dm_stat_shared *shared;
+
+ while (!list_empty(&stats->list)) {
+ s = container_of(stats->list.next, struct dm_stat, list_entry);
+ list_del(&s->list_entry);
+ for (ni = 0; ni < s->n_entries; ni++) {
+ shared = &s->stat_shared[ni];
+ if (WARN_ON(dm_stat_in_flight(shared))) {
+ DMCRIT("leaked in-flight counter at index %lu "
+ "(start %llu, end %llu, step %llu): reads %d, writes %d",
+ (unsigned long)ni,
+ (unsigned long long)s->start,
+ (unsigned long long)s->end,
+ (unsigned long long)s->step,
+ atomic_read(&shared->in_flight[READ]),
+ atomic_read(&shared->in_flight[WRITE]));
+ }
+ }
+ dm_stat_free(&s->rcu_head);
+ }
+ free_percpu(stats->last);
+}
+
+static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
+ sector_t step, const char *program_id, const char *aux_data,
+ void (*suspend_callback)(struct mapped_device *),
+ void (*resume_callback)(struct mapped_device *),
+ struct mapped_device *md)
+{
+ struct list_head *l;
+ struct dm_stat *s, *tmp_s;
+ sector_t n_entries;
+ size_t ni;
+ size_t shared_alloc_size;
+ size_t percpu_alloc_size;
+ struct dm_stat_percpu *p;
+ int cpu;
+ int ret_id;
+ int r;
+
+ if (end < start || !step)
+ return -EINVAL;
+
+ n_entries = end - start;
+ if (dm_sector_div64(n_entries, step))
+ n_entries++;
+
+ if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
+ return -EOVERFLOW;
+
+ shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
+ if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
+ return -EOVERFLOW;
+
+ percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
+ if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
+ return -EOVERFLOW;
+
+ if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
+ return -ENOMEM;
+
+ s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
+ if (!s)
+ return -ENOMEM;
+
+ s->n_entries = n_entries;
+ s->start = start;
+ s->end = end;
+ s->step = step;
+ s->shared_alloc_size = shared_alloc_size;
+ s->percpu_alloc_size = percpu_alloc_size;
+
+ s->program_id = kstrdup(program_id, GFP_KERNEL);
+ if (!s->program_id) {
+ r = -ENOMEM;
+ goto out;
+ }
+ s->aux_data = kstrdup(aux_data, GFP_KERNEL);
+ if (!s->aux_data) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ for (ni = 0; ni < n_entries; ni++) {
+ atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
+ atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
+ }
+
+ for_each_possible_cpu(cpu) {
+ p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
+ if (!p) {
+ r = -ENOMEM;
+ goto out;
+ }
+ s->stat_percpu[cpu] = p;
+ }
+
+ /*
+ * Suspend/resume to make sure there is no i/o in flight,
+ * so that newly created statistics will be exact.
+ *
+ * (note: we couldn't suspend earlier because we must not
+ * allocate memory while suspended)
+ */
+ suspend_callback(md);
+
+ mutex_lock(&stats->mutex);
+ s->id = 0;
+ list_for_each(l, &stats->list) {
+ tmp_s = container_of(l, struct dm_stat, list_entry);
+ if (WARN_ON(tmp_s->id < s->id)) {
+ r = -EINVAL;
+ goto out_unlock_resume;
+ }
+ if (tmp_s->id > s->id)
+ break;
+ if (unlikely(s->id == INT_MAX)) {
+ r = -ENFILE;
+ goto out_unlock_resume;
+ }
+ s->id++;
+ }
+ ret_id = s->id;
+ list_add_tail_rcu(&s->list_entry, l);
+ mutex_unlock(&stats->mutex);
+
+ resume_callback(md);
+
+ return ret_id;
+
+out_unlock_resume:
+ mutex_unlock(&stats->mutex);
+ resume_callback(md);
+out:
+ dm_stat_free(&s->rcu_head);
+ return r;
+}
+
+static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+
+ list_for_each_entry(s, &stats->list, list_entry) {
+ if (s->id > id)
+ break;
+ if (s->id == id)
+ return s;
+ }
+
+ return NULL;
+}
+
+static int dm_stats_delete(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+ int cpu;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ list_del_rcu(&s->list_entry);
+ mutex_unlock(&stats->mutex);
+
+ /*
+ * vfree can't be called from RCU callback
+ */
+ for_each_possible_cpu(cpu)
+ if (is_vmalloc_addr(s->stat_percpu))
+ goto do_sync_free;
+ if (is_vmalloc_addr(s)) {
+do_sync_free:
+ synchronize_rcu_expedited();
+ dm_stat_free(&s->rcu_head);
+ } else {
+ ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+ call_rcu(&s->rcu_head, dm_stat_free);
+ }
+ return 0;
+}
+
+static int dm_stats_list(struct dm_stats *stats, const char *program,
+ char *result, unsigned maxlen)
+{
+ struct dm_stat *s;
+ sector_t len;
+ unsigned sz = 0;
+
+ /*
+ * Output format:
+ * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+ */
+
+ mutex_lock(&stats->mutex);
+ list_for_each_entry(s, &stats->list, list_entry) {
+ if (!program || !strcmp(program, s->program_id)) {
+ len = s->end - s->start;
+ DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+ (unsigned long long)s->start,
+ (unsigned long long)len,
+ (unsigned long long)s->step,
+ s->program_id,
+ s->aux_data);
+ }
+ }
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
+{
+ /*
+ * This is racy, but so is part_round_stats_single.
+ */
+ unsigned long now = jiffies;
+ unsigned in_flight_read;
+ unsigned in_flight_write;
+ unsigned long difference = now - shared->stamp;
+
+ if (!difference)
+ return;
+ in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
+ in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
+ if (in_flight_read)
+ p->io_ticks[READ] += difference;
+ if (in_flight_write)
+ p->io_ticks[WRITE] += difference;
+ if (in_flight_read + in_flight_write) {
+ p->io_ticks_total += difference;
+ p->time_in_queue += (in_flight_read + in_flight_write) * difference;
+ }
+ shared->stamp = now;
+}
+
+static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
+ unsigned long bi_rw, sector_t len, bool merged,
+ bool end, unsigned long duration)
+{
+ unsigned long idx = bi_rw & REQ_WRITE;
+ struct dm_stat_shared *shared = &s->stat_shared[entry];
+ struct dm_stat_percpu *p;
+
+ /*
+ * For strict correctness we should use local_irq_disable/enable
+ * instead of preempt_disable/enable.
+ *
+ * This is racy if the driver finishes bios from non-interrupt
+ * context as well as from interrupt context or from more different
+ * interrupts.
+ *
+ * However, the race only results in not counting some events,
+ * so it is acceptable.
+ *
+ * part_stat_lock()/part_stat_unlock() have this race too.
+ */
+ preempt_disable();
+ p = &s->stat_percpu[smp_processor_id()][entry];
+
+ if (!end) {
+ dm_stat_round(shared, p);
+ atomic_inc(&shared->in_flight[idx]);
+ } else {
+ dm_stat_round(shared, p);
+ atomic_dec(&shared->in_flight[idx]);
+ p->sectors[idx] += len;
+ p->ios[idx] += 1;
+ p->merges[idx] += merged;
+ p->ticks[idx] += duration;
+ }
+
+ preempt_enable();
+}
+
+static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
+ sector_t bi_sector, sector_t end_sector,
+ bool end, unsigned long duration,
+ struct dm_stats_aux *stats_aux)
+{
+ sector_t rel_sector, offset, todo, fragment_len;
+ size_t entry;
+
+ if (end_sector <= s->start || bi_sector >= s->end)
+ return;
+ if (unlikely(bi_sector < s->start)) {
+ rel_sector = 0;
+ todo = end_sector - s->start;
+ } else {
+ rel_sector = bi_sector - s->start;
+ todo = end_sector - bi_sector;
+ }
+ if (unlikely(end_sector > s->end))
+ todo -= (end_sector - s->end);
+
+ offset = dm_sector_div64(rel_sector, s->step);
+ entry = rel_sector;
+ do {
+ if (WARN_ON_ONCE(entry >= s->n_entries)) {
+ DMCRIT("Invalid area access in region id %d", s->id);
+ return;
+ }
+ fragment_len = todo;
+ if (fragment_len > s->step - offset)
+ fragment_len = s->step - offset;
+ dm_stat_for_entry(s, entry, bi_rw, fragment_len,
+ stats_aux->merged, end, duration);
+ todo -= fragment_len;
+ entry++;
+ offset = 0;
+ } while (unlikely(todo != 0));
+}
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+ sector_t bi_sector, unsigned bi_sectors, bool end,
+ unsigned long duration, struct dm_stats_aux *stats_aux)
+{
+ struct dm_stat *s;
+ sector_t end_sector;
+ struct dm_stats_last_position *last;
+
+ if (unlikely(!bi_sectors))
+ return;
+
+ end_sector = bi_sector + bi_sectors;
+
+ if (!end) {
+ /*
+ * A race condition can at worst result in the merged flag being
+ * misrepresented, so we don't have to disable preemption here.
+ */
+ last = __this_cpu_ptr(stats->last);
+ stats_aux->merged =
+ (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+ ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
+ (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
+ ));
+ ACCESS_ONCE(last->last_sector) = end_sector;
+ ACCESS_ONCE(last->last_rw) = bi_rw;
+ }
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(s, &stats->list, list_entry)
+ __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
+
+ rcu_read_unlock();
+}
+
+static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
+ struct dm_stat *s, size_t x)
+{
+ int cpu;
+ struct dm_stat_percpu *p;
+
+ local_irq_disable();
+ p = &s->stat_percpu[smp_processor_id()][x];
+ dm_stat_round(shared, p);
+ local_irq_enable();
+
+ memset(&shared->tmp, 0, sizeof(shared->tmp));
+ for_each_possible_cpu(cpu) {
+ p = &s->stat_percpu[cpu][x];
+ shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ }
+}
+
+static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
+ bool init_tmp_percpu_totals)
+{
+ size_t x;
+ struct dm_stat_shared *shared;
+ struct dm_stat_percpu *p;
+
+ for (x = idx_start; x < idx_end; x++) {
+ shared = &s->stat_shared[x];
+ if (init_tmp_percpu_totals)
+ __dm_stat_init_temporary_percpu_totals(shared, s, x);
+ local_irq_disable();
+ p = &s->stat_percpu[smp_processor_id()][x];
+ p->sectors[READ] -= shared->tmp.sectors[READ];
+ p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
+ p->ios[READ] -= shared->tmp.ios[READ];
+ p->ios[WRITE] -= shared->tmp.ios[WRITE];
+ p->merges[READ] -= shared->tmp.merges[READ];
+ p->merges[WRITE] -= shared->tmp.merges[WRITE];
+ p->ticks[READ] -= shared->tmp.ticks[READ];
+ p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
+ p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
+ p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
+ p->io_ticks_total -= shared->tmp.io_ticks_total;
+ p->time_in_queue -= shared->tmp.time_in_queue;
+ local_irq_enable();
+ }
+}
+
+static int dm_stats_clear(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ __dm_stat_clear(s, 0, s->n_entries, true);
+
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+/*
+ * This is like jiffies_to_msec, but works for 64-bit values.
+ */
+static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
+{
+ unsigned long long result = 0;
+ unsigned mult;
+
+ if (j)
+ result = jiffies_to_msecs(j & 0x3fffff);
+ if (j >= 1 << 22) {
+ mult = jiffies_to_msecs(1 << 22);
+ result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
+ }
+ if (j >= 1ULL << 44)
+ result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
+
+ return result;
+}
+
+static int dm_stats_print(struct dm_stats *stats, int id,
+ size_t idx_start, size_t idx_len,
+ bool clear, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct dm_stat *s;
+ size_t x;
+ sector_t start, end, step;
+ size_t idx_end;
+ struct dm_stat_shared *shared;
+
+ /*
+ * Output format:
+ * <start_sector>+<length> counters
+ */
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ idx_end = idx_start + idx_len;
+ if (idx_end < idx_start ||
+ idx_end > s->n_entries)
+ idx_end = s->n_entries;
+
+ if (idx_start > idx_end)
+ idx_start = idx_end;
+
+ step = s->step;
+ start = s->start + (step * idx_start);
+
+ for (x = idx_start; x < idx_end; x++, start = end) {
+ shared = &s->stat_shared[x];
+ end = start + step;
+ if (unlikely(end > s->end))
+ end = s->end;
+
+ __dm_stat_init_temporary_percpu_totals(shared, s, x);
+
+ DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
+ (unsigned long long)start,
+ (unsigned long long)step,
+ shared->tmp.ios[READ],
+ shared->tmp.merges[READ],
+ shared->tmp.sectors[READ],
+ dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
+ shared->tmp.ios[WRITE],
+ shared->tmp.merges[WRITE],
+ shared->tmp.sectors[WRITE],
+ dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
+ dm_stat_in_flight(shared),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
+ dm_jiffies_to_msec64(shared->tmp.time_in_queue),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
+
+ if (unlikely(sz + 1 >= maxlen))
+ goto buffer_overflow;
+ }
+
+ if (clear)
+ __dm_stat_clear(s, idx_start, idx_end, false);
+
+buffer_overflow:
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
+{
+ struct dm_stat *s;
+ const char *new_aux_data;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ new_aux_data = kstrdup(aux_data, GFP_KERNEL);
+ if (!new_aux_data) {
+ mutex_unlock(&stats->mutex);
+ return -ENOMEM;
+ }
+
+ kfree(s->aux_data);
+ s->aux_data = new_aux_data;
+
+ mutex_unlock(&stats->mutex);
+
+ return 0;
+}
+
+static int message_stats_create(struct mapped_device *md,
+ unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int id;
+ char dummy;
+ unsigned long long start, end, len, step;
+ unsigned divisor;
+ const char *program_id, *aux_data;
+
+ /*
+ * Input format:
+ * <range> <step> [<program_id> [<aux_data>]]
+ */
+
+ if (argc < 3 || argc > 5)
+ return -EINVAL;
+
+ if (!strcmp(argv[1], "-")) {
+ start = 0;
+ len = dm_get_size(md);
+ if (!len)
+ len = 1;
+ } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
+ start != (sector_t)start || len != (sector_t)len)
+ return -EINVAL;
+
+ end = start + len;
+ if (start >= end)
+ return -EINVAL;
+
+ if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
+ step = end - start;
+ if (do_div(step, divisor))
+ step++;
+ if (!step)
+ step = 1;
+ } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
+ step != (sector_t)step || !step)
+ return -EINVAL;
+
+ program_id = "-";
+ aux_data = "-";
+
+ if (argc > 3)
+ program_id = argv[3];
+
+ if (argc > 4)
+ aux_data = argv[4];
+
+ /*
+ * If a buffer overflow happens after we created the region,
+ * it's too late (the userspace would retry with a larger
+ * buffer, but the region id that caused the overflow is already
+ * leaked). So we must detect buffer overflow in advance.
+ */
+ snprintf(result, maxlen, "%d", INT_MAX);
+ if (dm_message_test_buffer_overflow(result, maxlen))
+ return 1;
+
+ id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
+ dm_internal_suspend, dm_internal_resume, md);
+ if (id < 0)
+ return id;
+
+ snprintf(result, maxlen, "%d", id);
+
+ return 1;
+}
+
+static int message_stats_delete(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_delete(dm_get_stats(md), id);
+}
+
+static int message_stats_clear(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_clear(dm_get_stats(md), id);
+}
+
+static int message_stats_list(struct mapped_device *md,
+ unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int r;
+ const char *program = NULL;
+
+ if (argc < 1 || argc > 2)
+ return -EINVAL;
+
+ if (argc > 1) {
+ program = kstrdup(argv[1], GFP_KERNEL);
+ if (!program)
+ return -ENOMEM;
+ }
+
+ r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
+
+ kfree(program);
+
+ return r;
+}
+
+static int message_stats_print(struct mapped_device *md,
+ unsigned argc, char **argv, bool clear,
+ char *result, unsigned maxlen)
+{
+ int id;
+ char dummy;
+ unsigned long idx_start = 0, idx_len = ULONG_MAX;
+
+ if (argc != 2 && argc != 4)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ if (argc > 3) {
+ if (strcmp(argv[2], "-") &&
+ sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
+ return -EINVAL;
+ if (strcmp(argv[3], "-") &&
+ sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
+ return -EINVAL;
+ }
+
+ return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
+ result, maxlen);
+}
+
+static int message_stats_set_aux(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 3)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
+}
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int r;
+
+ if (dm_request_based(md)) {
+ DMWARN("Statistics are only supported for bio-based devices");
+ return -EOPNOTSUPP;
+ }
+
+ /* All messages here must start with '@' */
+ if (!strcasecmp(argv[0], "@stats_create"))
+ r = message_stats_create(md, argc, argv, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_delete"))
+ r = message_stats_delete(md, argc, argv);
+ else if (!strcasecmp(argv[0], "@stats_clear"))
+ r = message_stats_clear(md, argc, argv);
+ else if (!strcasecmp(argv[0], "@stats_list"))
+ r = message_stats_list(md, argc, argv, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_print"))
+ r = message_stats_print(md, argc, argv, false, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_print_clear"))
+ r = message_stats_print(md, argc, argv, true, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_set_aux"))
+ r = message_stats_set_aux(md, argc, argv);
+ else
+ return 2; /* this wasn't a stats message */
+
+ if (r == -EINVAL)
+ DMWARN("Invalid parameters for message %s", argv[0]);
+
+ return r;
+}
+
+int __init dm_statistics_init(void)
+{
+ dm_stat_need_rcu_barrier = 0;
+ return 0;
+}
+
+void dm_statistics_exit(void)
+{
+ if (dm_stat_need_rcu_barrier)
+ rcu_barrier();
+ if (WARN_ON(shared_memory_amount))
+ DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
+}
+
+module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
+MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
new file mode 100644
index 0000000..e7c4984
--- /dev/null
+++ b/drivers/md/dm-stats.h
@@ -0,0 +1,40 @@
+#ifndef DM_STATS_H
+#define DM_STATS_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+int dm_statistics_init(void);
+void dm_statistics_exit(void);
+
+struct dm_stats {
+ struct mutex mutex;
+ struct list_head list; /* list of struct dm_stat */
+ struct dm_stats_last_position __percpu *last;
+ sector_t last_sector;
+ unsigned last_rw;
+};
+
+struct dm_stats_aux {
+ bool merged;
+};
+
+void dm_stats_init(struct dm_stats *st);
+void dm_stats_cleanup(struct dm_stats *st);
+
+struct mapped_device;
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen);
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+ sector_t bi_sector, unsigned bi_sectors, bool end,
+ unsigned long duration, struct dm_stats_aux *aux);
+
+static inline bool dm_stats_used(struct dm_stats *st)
+{
+ return !list_empty(&st->list);
+}
+
+#endif
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d907ca6..73c1712 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,6 +4,7 @@
* This file is released under the GPL.
*/
+#include "dm.h"
#include <linux/device-mapper.h>
#include <linux/module.h>
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f221812..8f87835 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -860,14 +860,17 @@ EXPORT_SYMBOL(dm_consume_args);
static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
- unsigned bio_based = 0, request_based = 0;
+ unsigned bio_based = 0, request_based = 0, hybrid = 0;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices;
+ unsigned live_md_type;
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
- if (dm_target_request_based(tgt))
+ if (dm_target_hybrid(tgt))
+ hybrid = 1;
+ else if (dm_target_request_based(tgt))
request_based = 1;
else
bio_based = 1;
@@ -879,6 +882,19 @@ static int dm_table_set_type(struct dm_table *t)
}
}
+ if (hybrid && !bio_based && !request_based) {
+ /*
+ * The targets can work either way.
+ * Determine the type from the live device.
+ * Default to bio-based if device is new.
+ */
+ live_md_type = dm_get_md_type(t->md);
+ if (live_md_type == DM_TYPE_REQUEST_BASED)
+ request_based = 1;
+ else
+ bio_based = 1;
+ }
+
if (bio_based) {
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 37ba5db..242e3ce 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,19 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
return -EIO;
}
+static int io_err_map_rq(struct dm_target *ti, struct request *clone,
+ union map_info *map_context)
+{
+ return -EIO;
+}
+
static struct target_type error_target = {
.name = "error",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
+ .map_rq = io_err_map_rq,
};
int __init dm_target_init(void)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 88f2f80..ed06342 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -887,7 +887,8 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
- DMERR_LIMIT("commit failed: error = %d", r);
+ DMERR_LIMIT("%s: commit failed: error = %d",
+ dm_device_name(pool->pool_md), r);
return r;
}
@@ -917,6 +918,13 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
unsigned long flags;
struct pool *pool = tc->pool;
+ /*
+ * Once no_free_space is set we must not allow allocation to succeed.
+ * Otherwise it is difficult to explain, debug, test and support.
+ */
+ if (pool->no_free_space)
+ return -ENOSPC;
+
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
if (r)
return r;
@@ -931,31 +939,30 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
}
if (!free_blocks) {
- if (pool->no_free_space)
- return -ENOSPC;
- else {
- /*
- * Try to commit to see if that will free up some
- * more space.
- */
- (void) commit_or_fallback(pool);
+ /*
+ * Try to commit to see if that will free up some
+ * more space.
+ */
+ (void) commit_or_fallback(pool);
- r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
- if (r)
- return r;
+ r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+ if (r)
+ return r;
- /*
- * If we still have no space we set a flag to avoid
- * doing all this checking and return -ENOSPC.
- */
- if (!free_blocks) {
- DMWARN("%s: no free space available.",
- dm_device_name(pool->pool_md));
- spin_lock_irqsave(&pool->lock, flags);
- pool->no_free_space = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
- return -ENOSPC;
- }
+ /*
+ * If we still have no space we set a flag to avoid
+ * doing all this checking and return -ENOSPC. This
+ * flag serves as a latch that disallows allocations from
+ * this pool until the admin takes action (e.g. resize or
+ * table reload).
+ */
+ if (!free_blocks) {
+ DMWARN("%s: no free space available.",
+ dm_device_name(pool->pool_md));
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->no_free_space = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return -ENOSPC;
}
}
@@ -1085,6 +1092,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
{
int r;
dm_block_t data_block;
+ struct pool *pool = tc->pool;
r = alloc_data_block(tc, &data_block);
switch (r) {
@@ -1094,13 +1102,14 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
case -ENOSPC:
- no_space(tc->pool, cell);
+ no_space(pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- cell_error(tc->pool, cell);
+ set_pool_mode(pool, PM_READ_ONLY);
+ cell_error(pool, cell);
break;
}
}
@@ -1386,7 +1395,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
switch (mode) {
case PM_FAIL:
- DMERR("switching pool to failure mode");
+ DMERR("%s: switching pool to failure mode",
+ dm_device_name(pool->pool_md));
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1394,10 +1404,12 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_READ_ONLY:
- DMERR("switching pool to read-only mode");
+ DMERR("%s: switching pool to read-only mode",
+ dm_device_name(pool->pool_md));
r = dm_pool_abort_metadata(pool->pmd);
if (r) {
- DMERR("aborting transaction failed");
+ DMERR("%s: aborting transaction failed",
+ dm_device_name(pool->pool_md));
set_pool_mode(pool, PM_FAIL);
} else {
dm_pool_metadata_read_only(pool->pmd);
@@ -2156,19 +2168,22 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
if (r) {
- DMERR("failed to retrieve data device size");
+ DMERR("%s: failed to retrieve data device size",
+ dm_device_name(pool->pool_md));
return r;
}
if (data_size < sb_data_size) {
- DMERR("pool target (%llu blocks) too small: expected %llu",
+ DMERR("%s: pool target (%llu blocks) too small: expected %llu",
+ dm_device_name(pool->pool_md),
(unsigned long long)data_size, sb_data_size);
return -EINVAL;
} else if (data_size > sb_data_size) {
r = dm_pool_resize_data_dev(pool->pmd, data_size);
if (r) {
- DMERR("failed to resize data device");
+ DMERR("%s: failed to resize data device",
+ dm_device_name(pool->pool_md));
set_pool_mode(pool, PM_READ_ONLY);
return r;
}
@@ -2192,19 +2207,22 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
if (r) {
- DMERR("failed to retrieve data device size");
+ DMERR("%s: failed to retrieve metadata device size",
+ dm_device_name(pool->pool_md));
return r;
}
if (metadata_dev_size < sb_metadata_dev_size) {
- DMERR("metadata device (%llu blocks) too small: expected %llu",
+ DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
+ dm_device_name(pool->pool_md),
metadata_dev_size, sb_metadata_dev_size);
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
- DMERR("failed to resize metadata device");
+ DMERR("%s: failed to resize metadata device",
+ dm_device_name(pool->pool_md));
return r;
}
@@ -2530,37 +2548,43 @@ static void pool_status(struct dm_target *ti, status_type_t type,
r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
if (r) {
- DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
if (r) {
- DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+ DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
if (r) {
- DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
if (r) {
- DMERR("dm_pool_get_free_block_count returned %d", r);
+ DMERR("%s: dm_pool_get_free_block_count returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
if (r) {
- DMERR("dm_pool_get_data_dev_size returned %d", r);
+ DMERR("%s: dm_pool_get_data_dev_size returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
if (r) {
- DMERR("dm_pool_get_metadata_snap returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_snap returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
@@ -2648,9 +2672,17 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
- blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ /*
+ * If the system-determined stacked limits are compatible with the
+ * pool's blocksize (io_opt is a factor) do not override them.
+ */
+ if (io_opt_sectors < pool->sectors_per_block ||
+ do_div(io_opt_sectors, pool->sectors_per_block)) {
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ }
/*
* pt->adjusted_pf is a staging area for the actual features to use.
@@ -2669,7 +2701,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2956,7 +2988,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9e39d2b..6a5e9ed 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -60,6 +60,7 @@ struct dm_io {
struct bio *bio;
unsigned long start_time;
spinlock_t endio_lock;
+ struct dm_stats_aux stats_aux;
};
/*
@@ -198,6 +199,8 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
+
+ struct dm_stats stats;
};
/*
@@ -269,6 +272,7 @@ static int (*_inits[])(void) __initdata = {
dm_io_init,
dm_kcopyd_init,
dm_interface_init,
+ dm_statistics_init,
};
static void (*_exits[])(void) = {
@@ -279,6 +283,7 @@ static void (*_exits[])(void) = {
dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
+ dm_statistics_exit,
};
static int __init dm_init(void)
@@ -384,6 +389,16 @@ int dm_lock_for_deletion(struct mapped_device *md)
return r;
}
+sector_t dm_get_size(struct mapped_device *md)
+{
+ return get_capacity(md->disk);
+}
+
+struct dm_stats *dm_get_stats(struct mapped_device *md)
+{
+ return &md->stats;
+}
+
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
@@ -466,8 +481,9 @@ static int md_in_flight(struct mapped_device *md)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
+ struct bio *bio = io->bio;
int cpu;
- int rw = bio_data_dir(io->bio);
+ int rw = bio_data_dir(bio);
io->start_time = jiffies;
@@ -476,6 +492,10 @@ static void start_io_acct(struct dm_io *io)
part_stat_unlock();
atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw]));
+
+ if (unlikely(dm_stats_used(&md->stats)))
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ bio_sectors(bio), false, 0, &io->stats_aux);
}
static void end_io_acct(struct dm_io *io)
@@ -491,6 +511,10 @@ static void end_io_acct(struct dm_io *io)
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
part_stat_unlock();
+ if (unlikely(dm_stats_used(&md->stats)))
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ bio_sectors(bio), true, duration, &io->stats_aux);
+
/*
* After this is decremented the bio must not be touched if it is
* a flush.
@@ -1519,7 +1543,7 @@ static void _dm_request(struct request_queue *q, struct bio *bio)
return;
}
-static int dm_request_based(struct mapped_device *md)
+int dm_request_based(struct mapped_device *md)
{
return blk_queue_stackable(md->queue);
}
@@ -1946,8 +1970,7 @@ static struct mapped_device *alloc_dev(int minor)
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
- md->wq = alloc_workqueue("kdmflush",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
if (!md->wq)
goto bad_thread;
@@ -1959,6 +1982,8 @@ static struct mapped_device *alloc_dev(int minor)
md->flush_bio.bi_bdev = md->bdev;
md->flush_bio.bi_rw = WRITE_FLUSH;
+ dm_stats_init(&md->stats);
+
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
@@ -2010,6 +2035,7 @@ static void free_dev(struct mapped_device *md)
put_disk(md->disk);
blk_cleanup_queue(md->queue);
+ dm_stats_cleanup(&md->stats);
module_put(THIS_MODULE);
kfree(md);
}
@@ -2151,7 +2177,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
/*
* Wipe any geometry if the size of the table changed.
*/
- if (size != get_capacity(md->disk))
+ if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
__set_size(md, size);
@@ -2236,11 +2262,13 @@ void dm_unlock_md_type(struct mapped_device *md)
void dm_set_md_type(struct mapped_device *md, unsigned type)
{
+ BUG_ON(!mutex_is_locked(&md->type_lock));
md->type = type;
}
unsigned dm_get_md_type(struct mapped_device *md)
{
+ BUG_ON(!mutex_is_locked(&md->type_lock));
return md->type;
}
@@ -2695,6 +2723,38 @@ out:
return r;
}
+/*
+ * Internal suspend/resume works like userspace-driven suspend. It waits
+ * until all bios finish and prevents issuing new bios to the target drivers.
+ * It may be used only from the kernel.
+ *
+ * Internal suspend holds md->suspend_lock, which prevents interaction with
+ * userspace-driven suspend.
+ */
+
+void dm_internal_suspend(struct mapped_device *md)
+{
+ mutex_lock(&md->suspend_lock);
+ if (dm_suspended_md(md))
+ return;
+
+ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
+ synchronize_srcu(&md->io_barrier);
+ flush_workqueue(md->wq);
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+}
+
+void dm_internal_resume(struct mapped_device *md)
+{
+ if (dm_suspended_md(md))
+ goto done;
+
+ dm_queue_flush(md);
+
+done:
+ mutex_unlock(&md->suspend_lock);
+}
+
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 45b97da..5e604cc 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -16,6 +16,8 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include "dm-stats.h"
+
/*
* Suspend feature flags
*/
@@ -89,10 +91,21 @@ int dm_setup_md_queue(struct mapped_device *md);
#define dm_target_is_valid(t) ((t)->table)
/*
+ * To check whether the target type is bio-based or not (request-based).
+ */
+#define dm_target_bio_based(t) ((t)->type->map != NULL)
+
+/*
* To check whether the target type is request-based or not (bio-based).
*/
#define dm_target_request_based(t) ((t)->type->map_rq != NULL)
+/*
+ * To check whether the target type is a hybrid (capable of being
+ * either request-based or bio-based).
+ */
+#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
+
/*-----------------------------------------------------------------
* A registry of target types.
*---------------------------------------------------------------*/
@@ -146,10 +159,16 @@ void dm_destroy(struct mapped_device *md);
void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
+int dm_request_based(struct mapped_device *md);
+sector_t dm_get_size(struct mapped_device *md);
+struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie);
+void dm_internal_suspend(struct mapped_device *md);
+void dm_internal_resume(struct mapped_device *md);
+
int dm_io_init(void);
void dm_io_exit(void);
@@ -162,4 +181,12 @@ void dm_kcopyd_exit(void);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
+/*
+ * Helpers that are used by DM core
+ */
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+{
+ return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9f13e13..adf4d7e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1180,7 +1180,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
- mddev->bitmap_info.space;
+ mddev->bitmap_info.default_space;
}
} else if (mddev->pers == NULL) {
@@ -3429,7 +3429,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
- if (mddev->safemode_delay < old_delay)
+ if (mddev->safemode_delay < old_delay || old_delay == 0)
md_safemode_timeout((unsigned long)mddev);
}
return len;
@@ -5144,7 +5144,7 @@ int md_run(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->flags)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
md_new_event(mddev);
@@ -5289,7 +5289,7 @@ static void __md_stop_writes(struct mddev *mddev)
md_super_wait(mddev);
if (mddev->ro == 0 &&
- (!mddev->in_sync || mddev->flags)) {
+ (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev, 1);
@@ -5337,8 +5337,14 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
err = -EBUSY;
goto out;
}
- if (bdev)
- sync_blockdev(bdev);
+ if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
+ /* Someone opened the device since we flushed it
+ * so page cache could be dirty and it is too late
+ * to flush. So abort
+ */
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5373,14 +5379,14 @@ static int do_md_stop(struct mddev * mddev, int mode,
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
- if (bdev)
- /* It is possible IO was issued on some other
- * open file which was closed before we took ->open_mutex.
- * As that was not the last close __blkdev_put will not
- * have called sync_blockdev, so we must.
+ if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
+ /* Someone opened the device since we flushed it
+ * so page cache could be dirty and it is too late
+ * to flush. So abort
*/
- sync_blockdev(bdev);
-
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
@@ -5628,10 +5634,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
char *ptr, *buf = NULL;
int err = -ENOMEM;
- if (md_allow_write(mddev))
- file = kmalloc(sizeof(*file), GFP_NOIO);
- else
- file = kmalloc(sizeof(*file), GFP_KERNEL);
+ file = kmalloc(sizeof(*file), GFP_NOIO);
if (!file)
goto out;
@@ -6420,6 +6423,20 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
!test_bit(MD_RECOVERY_NEEDED,
&mddev->flags),
msecs_to_jiffies(5000));
+ if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
+ /* Need to flush page cache, and ensure no-one else opens
+ * and writes
+ */
+ mutex_lock(&mddev->open_mutex);
+ if (atomic_read(&mddev->openers) > 1) {
+ mutex_unlock(&mddev->open_mutex);
+ err = -EBUSY;
+ goto abort;
+ }
+ set_bit(MD_STILL_CLOSED, &mddev->flags);
+ mutex_unlock(&mddev->open_mutex);
+ sync_blockdev(bdev);
+ }
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
@@ -6673,6 +6690,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
err = 0;
atomic_inc(&mddev->openers);
+ clear_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
@@ -7817,7 +7835,7 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 20f02c0..608050c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -204,12 +204,16 @@ struct mddev {
struct md_personality *pers;
dev_t unit;
int md_minor;
- struct list_head disks;
+ struct list_head disks;
unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
+#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
+#define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
+ * md_ioctl checked on it.
+ */
int suspended;
atomic_t active_io;
@@ -218,7 +222,7 @@ struct mddev {
* are happening, so run/
* takeover/stop are not safe
*/
- int ready; /* See when safe to pass
+ int ready; /* See when safe to pass
* IO requests down */
struct gendisk *gendisk;
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 81b5138..a7e8bf2 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -615,6 +615,11 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
}
EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
+{
+ dm_bufio_prefetch(bm->bufio, b, 1);
+}
+
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
bm->read_only = true;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index be5bff6..9a82083 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,6 +108,11 @@ int dm_bm_unlock(struct dm_block *b);
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock);
+ /*
+ * Request data be prefetched into the cache.
+ */
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
+
/*
* Switches the bm to a read only mode. Once read-only mode
* has been entered the following functions will return -EPERM.
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 3586542..468e371 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -161,6 +161,7 @@ struct frame {
};
struct del_stack {
+ struct dm_btree_info *info;
struct dm_transaction_manager *tm;
int top;
struct frame spine[MAX_SPINE_DEPTH];
@@ -183,6 +184,20 @@ static int unprocessed_frames(struct del_stack *s)
return s->top >= 0;
}
+static void prefetch_children(struct del_stack *s, struct frame *f)
+{
+ unsigned i;
+ struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
+
+ for (i = 0; i < f->nr_children; i++)
+ dm_bm_prefetch(bm, value64(f->n, i));
+}
+
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+ return f->level < (info->levels - 1);
+}
+
static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
{
int r;
@@ -205,6 +220,7 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
dm_tm_dec(s->tm, b);
else {
+ uint32_t flags;
struct frame *f = s->spine + ++s->top;
r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
@@ -217,6 +233,10 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
f->level = level;
f->nr_children = le32_to_cpu(f->n->header.nr_entries);
f->current_child = 0;
+
+ flags = le32_to_cpu(f->n->header.flags);
+ if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
+ prefetch_children(s, f);
}
return 0;
@@ -230,11 +250,6 @@ static void pop_frame(struct del_stack *s)
dm_tm_unlock(s->tm, f->b);
}
-static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
-{
- return f->level < (info->levels - 1);
-}
-
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
{
int r;
@@ -243,6 +258,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
+ s->info = info;
s->tm = info->tm;
s->top = -1;
@@ -287,7 +303,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
info->value_type.dec(info->value_type.context,
value_ptr(f->n, i));
}
- f->current_child = f->nr_children;
+ pop_frame(s);
}
}
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 3e7a88d..6058569 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -292,16 +292,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
return dm_tm_unlock(ll->tm, blk);
}
-int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
+ uint32_t *result)
{
__le32 le_rc;
- int r = sm_ll_lookup_bitmap(ll, b, result);
-
- if (r)
- return r;
-
- if (*result != 3)
- return r;
+ int r;
r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
if (r < 0)
@@ -312,6 +307,19 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
return r;
}
+int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+{
+ int r = sm_ll_lookup_bitmap(ll, b, result);
+
+ if (r)
+ return r;
+
+ if (*result != 3)
+ return r;
+
+ return sm_ll_lookup_big_ref_count(ll, b, result);
+}
+
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result)
{
@@ -372,11 +380,12 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return -ENOSPC;
}
-int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
- uint32_t ref_count, enum allocation_event *ev)
+static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+ uint32_t (*mutator)(void *context, uint32_t old),
+ void *context, enum allocation_event *ev)
{
int r;
- uint32_t bit, old;
+ uint32_t bit, old, ref_count;
struct dm_block *nb;
dm_block_t index = b;
struct disk_index_entry ie_disk;
@@ -399,6 +408,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
bm_le = dm_bitmap_data(nb);
old = sm_lookup_bitmap(bm_le, bit);
+ if (old > 2) {
+ r = sm_ll_lookup_big_ref_count(ll, b, &old);
+ if (r < 0)
+ return r;
+ }
+
+ ref_count = mutator(context, old);
+
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
@@ -448,31 +465,35 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
return ll->save_ie(ll, index, &ie_disk);
}
-int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+static uint32_t set_ref_count(void *context, uint32_t old)
{
- int r;
- uint32_t rc;
-
- r = sm_ll_lookup(ll, b, &rc);
- if (r)
- return r;
+ return *((uint32_t *) context);
+}
- return sm_ll_insert(ll, b, rc + 1, ev);
+int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
+ uint32_t ref_count, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
}
-int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+static uint32_t inc_ref_count(void *context, uint32_t old)
{
- int r;
- uint32_t rc;
+ return old + 1;
+}
- r = sm_ll_lookup(ll, b, &rc);
- if (r)
- return r;
+int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
+}
- if (!rc)
- return -EINVAL;
+static uint32_t dec_ref_count(void *context, uint32_t old)
+{
+ return old - 1;
+}
- return sm_ll_insert(ll, b, rc - 1, ev);
+int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
}
int sm_ll_commit(struct ll_disk *ll)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 78ea443..7ff4f25 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,7 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <linux/nodemask.h>
#include <trace/events/block.h>
#include "md.h"
@@ -60,6 +61,10 @@
#include "raid0.h"
#include "bitmap.h"
+#define cpu_to_group(cpu) cpu_to_node(cpu)
+#define ANY_GROUP NUMA_NO_NODE
+
+static struct workqueue_struct *raid5_wq;
/*
* Stripe cache
*/
@@ -72,6 +77,7 @@
#define BYPASS_THRESHOLD 1
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
+#define MAX_STRIPE_BATCH 8
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
@@ -200,6 +206,49 @@ static int stripe_operations_active(struct stripe_head *sh)
test_bit(STRIPE_COMPUTE_RUN, &sh->state);
}
+static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct r5worker_group *group;
+ int thread_cnt;
+ int i, cpu = sh->cpu;
+
+ if (!cpu_online(cpu)) {
+ cpu = cpumask_any(cpu_online_mask);
+ sh->cpu = cpu;
+ }
+
+ if (list_empty(&sh->lru)) {
+ struct r5worker_group *group;
+ group = conf->worker_groups + cpu_to_group(cpu);
+ list_add_tail(&sh->lru, &group->handle_list);
+ group->stripes_cnt++;
+ sh->group = group;
+ }
+
+ if (conf->worker_cnt_per_group == 0) {
+ md_wakeup_thread(conf->mddev->thread);
+ return;
+ }
+
+ group = conf->worker_groups + cpu_to_group(sh->cpu);
+
+ group->workers[0].working = true;
+ /* at least one worker should run to avoid race */
+ queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
+
+ thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
+ /* wakeup more workers */
+ for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
+ if (group->workers[i].working == false) {
+ group->workers[i].working = true;
+ queue_work_on(sh->cpu, raid5_wq,
+ &group->workers[i].work);
+ thread_cnt--;
+ }
+ }
+}
+
static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
{
BUG_ON(!list_empty(&sh->lru));
@@ -214,7 +263,12 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
else {
clear_bit(STRIPE_DELAYED, &sh->state);
clear_bit(STRIPE_BIT_DELAY, &sh->state);
- list_add_tail(&sh->lru, &conf->handle_list);
+ if (conf->worker_cnt_per_group == 0) {
+ list_add_tail(&sh->lru, &conf->handle_list);
+ } else {
+ raid5_wakeup_stripe_thread(sh);
+ return;
+ }
}
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -239,12 +293,62 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
do_release_stripe(conf, sh);
}
+static struct llist_node *llist_reverse_order(struct llist_node *head)
+{
+ struct llist_node *new_head = NULL;
+
+ while (head) {
+ struct llist_node *tmp = head;
+ head = head->next;
+ tmp->next = new_head;
+ new_head = tmp;
+ }
+
+ return new_head;
+}
+
+/* should hold conf->device_lock already */
+static int release_stripe_list(struct r5conf *conf)
+{
+ struct stripe_head *sh;
+ int count = 0;
+ struct llist_node *head;
+
+ head = llist_del_all(&conf->released_stripes);
+ head = llist_reverse_order(head);
+ while (head) {
+ sh = llist_entry(head, struct stripe_head, release_list);
+ head = llist_next(head);
+ /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
+ smp_mb();
+ clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
+ /*
+ * Don't worry the bit is set here, because if the bit is set
+ * again, the count is always > 1. This is true for
+ * STRIPE_ON_UNPLUG_LIST bit too.
+ */
+ __release_stripe(conf, sh);
+ count++;
+ }
+
+ return count;
+}
+
static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
+ bool wakeup;
+ if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ goto slow_path;
+ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
+ if (wakeup)
+ md_wakeup_thread(conf->mddev->thread);
+ return;
+slow_path:
local_irq_save(flags);
+ /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
do_release_stripe(conf, sh);
spin_unlock(&conf->device_lock);
@@ -359,6 +463,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
raid5_build_block(sh, i, previous);
}
insert_hash(conf, sh);
+ sh->cpu = smp_processor_id();
}
static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -491,7 +596,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
&& !test_bit(STRIPE_EXPANDING, &sh->state)
- && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
+ && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
+ && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
@@ -499,6 +605,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
!test_bit(STRIPE_EXPANDING, &sh->state))
BUG();
list_del_init(&sh->lru);
+ if (sh->group) {
+ sh->group->stripes_cnt--;
+ sh->group = NULL;
+ }
}
}
} while (sh == NULL);
@@ -3779,6 +3889,7 @@ static void raid5_activate_delayed(struct r5conf *conf)
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
list_add_tail(&sh->lru, &conf->hold_list);
+ raid5_wakeup_stripe_thread(sh);
}
}
}
@@ -4058,18 +4169,35 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
* head of the hold_list has changed, i.e. the head was promoted to the
* handle_list.
*/
-static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
+static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
{
- struct stripe_head *sh;
+ struct stripe_head *sh = NULL, *tmp;
+ struct list_head *handle_list = NULL;
+ struct r5worker_group *wg = NULL;
+
+ if (conf->worker_cnt_per_group == 0) {
+ handle_list = &conf->handle_list;
+ } else if (group != ANY_GROUP) {
+ handle_list = &conf->worker_groups[group].handle_list;
+ wg = &conf->worker_groups[group];
+ } else {
+ int i;
+ for (i = 0; i < conf->group_cnt; i++) {
+ handle_list = &conf->worker_groups[i].handle_list;
+ wg = &conf->worker_groups[i];
+ if (!list_empty(handle_list))
+ break;
+ }
+ }
pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
__func__,
- list_empty(&conf->handle_list) ? "empty" : "busy",
+ list_empty(handle_list) ? "empty" : "busy",
list_empty(&conf->hold_list) ? "empty" : "busy",
atomic_read(&conf->pending_full_writes), conf->bypass_count);
- if (!list_empty(&conf->handle_list)) {
- sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
+ if (!list_empty(handle_list)) {
+ sh = list_entry(handle_list->next, typeof(*sh), lru);
if (list_empty(&conf->hold_list))
conf->bypass_count = 0;
@@ -4087,14 +4215,32 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
((conf->bypass_threshold &&
conf->bypass_count > conf->bypass_threshold) ||
atomic_read(&conf->pending_full_writes) == 0)) {
- sh = list_entry(conf->hold_list.next,
- typeof(*sh), lru);
- conf->bypass_count -= conf->bypass_threshold;
- if (conf->bypass_count < 0)
- conf->bypass_count = 0;
- } else
+
+ list_for_each_entry(tmp, &conf->hold_list, lru) {
+ if (conf->worker_cnt_per_group == 0 ||
+ group == ANY_GROUP ||
+ !cpu_online(tmp->cpu) ||
+ cpu_to_group(tmp->cpu) == group) {
+ sh = tmp;
+ break;
+ }
+ }
+
+ if (sh) {
+ conf->bypass_count -= conf->bypass_threshold;
+ if (conf->bypass_count < 0)
+ conf->bypass_count = 0;
+ }
+ wg = NULL;
+ }
+
+ if (!sh)
return NULL;
+ if (wg) {
+ wg->stripes_cnt--;
+ sh->group = NULL;
+ }
list_del_init(&sh->lru);
atomic_inc(&sh->count);
BUG_ON(atomic_read(&sh->count) != 1);
@@ -4127,6 +4273,10 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
*/
smp_mb__before_clear_bit();
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+ /*
+ * STRIPE_ON_RELEASE_LIST could be set here. In that
+ * case, the count is always > 1 here
+ */
__release_stripe(conf, sh);
cnt++;
}
@@ -4286,8 +4436,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int previous;
+ int seq;
retry:
+ seq = read_seqcount_begin(&conf->gen_lock);
previous = 0;
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
if (unlikely(conf->reshape_progress != MaxSector)) {
@@ -4320,7 +4472,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
previous,
&dd_idx, NULL);
pr_debug("raid456: make_request, sector %llu logical %llu\n",
- (unsigned long long)new_sector,
+ (unsigned long long)new_sector,
(unsigned long long)logical_sector);
sh = get_active_stripe(conf, new_sector, previous,
@@ -4349,6 +4501,13 @@ static void make_request(struct mddev *mddev, struct bio * bi)
goto retry;
}
}
+ if (read_seqcount_retry(&conf->gen_lock, seq)) {
+ /* Might have got the wrong stripe_head
+ * by accident
+ */
+ release_stripe(sh);
+ goto retry;
+ }
if (rw == WRITE &&
logical_sector >= mddev->suspend_lo &&
@@ -4788,14 +4947,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
-#define MAX_STRIPE_BATCH 8
-static int handle_active_stripes(struct r5conf *conf)
+static int handle_active_stripes(struct r5conf *conf, int group,
+ struct r5worker *worker)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
int i, batch_size = 0;
while (batch_size < MAX_STRIPE_BATCH &&
- (sh = __get_priority_stripe(conf)) != NULL)
+ (sh = __get_priority_stripe(conf, group)) != NULL)
batch[batch_size++] = sh;
if (batch_size == 0)
@@ -4813,6 +4972,39 @@ static int handle_active_stripes(struct r5conf *conf)
return batch_size;
}
+static void raid5_do_work(struct work_struct *work)
+{
+ struct r5worker *worker = container_of(work, struct r5worker, work);
+ struct r5worker_group *group = worker->group;
+ struct r5conf *conf = group->conf;
+ int group_id = group - conf->worker_groups;
+ int handled;
+ struct blk_plug plug;
+
+ pr_debug("+++ raid5worker active\n");
+
+ blk_start_plug(&plug);
+ handled = 0;
+ spin_lock_irq(&conf->device_lock);
+ while (1) {
+ int batch_size, released;
+
+ released = release_stripe_list(conf);
+
+ batch_size = handle_active_stripes(conf, group_id, worker);
+ worker->working = false;
+ if (!batch_size && !released)
+ break;
+ handled += batch_size;
+ }
+ pr_debug("%d stripes handled\n", handled);
+
+ spin_unlock_irq(&conf->device_lock);
+ blk_finish_plug(&plug);
+
+ pr_debug("--- raid5worker inactive\n");
+}
+
/*
* This is our raid5 kernel thread.
*
@@ -4836,7 +5028,9 @@ static void raid5d(struct md_thread *thread)
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
- int batch_size;
+ int batch_size, released;
+
+ released = release_stripe_list(conf);
if (
!list_empty(&conf->bitmap_list)) {
@@ -4860,8 +5054,8 @@ static void raid5d(struct md_thread *thread)
handled++;
}
- batch_size = handle_active_stripes(conf);
- if (!batch_size)
+ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+ if (!batch_size && !released)
break;
handled += batch_size;
@@ -4989,10 +5183,70 @@ stripe_cache_active_show(struct mddev *mddev, char *page)
static struct md_sysfs_entry
raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
+static ssize_t
+raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ if (conf)
+ return sprintf(page, "%d\n", conf->worker_cnt_per_group);
+ else
+ return 0;
+}
+
+static int alloc_thread_groups(struct r5conf *conf, int cnt);
+static ssize_t
+raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+ int err;
+ struct r5worker_group *old_groups;
+ int old_group_cnt;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+
+ if (new == conf->worker_cnt_per_group)
+ return len;
+
+ mddev_suspend(mddev);
+
+ old_groups = conf->worker_groups;
+ old_group_cnt = conf->worker_cnt_per_group;
+
+ conf->worker_groups = NULL;
+ err = alloc_thread_groups(conf, new);
+ if (err) {
+ conf->worker_groups = old_groups;
+ conf->worker_cnt_per_group = old_group_cnt;
+ } else {
+ if (old_groups)
+ kfree(old_groups[0].workers);
+ kfree(old_groups);
+ }
+
+ mddev_resume(mddev);
+
+ if (err)
+ return err;
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
+ raid5_show_group_thread_cnt,
+ raid5_store_group_thread_cnt);
+
static struct attribute *raid5_attrs[] = {
&raid5_stripecache_size.attr,
&raid5_stripecache_active.attr,
&raid5_preread_bypass_threshold.attr,
+ &raid5_group_thread_cnt.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -5000,6 +5254,54 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
+static int alloc_thread_groups(struct r5conf *conf, int cnt)
+{
+ int i, j;
+ ssize_t size;
+ struct r5worker *workers;
+
+ conf->worker_cnt_per_group = cnt;
+ if (cnt == 0) {
+ conf->worker_groups = NULL;
+ return 0;
+ }
+ conf->group_cnt = num_possible_nodes();
+ size = sizeof(struct r5worker) * cnt;
+ workers = kzalloc(size * conf->group_cnt, GFP_NOIO);
+ conf->worker_groups = kzalloc(sizeof(struct r5worker_group) *
+ conf->group_cnt, GFP_NOIO);
+ if (!conf->worker_groups || !workers) {
+ kfree(workers);
+ kfree(conf->worker_groups);
+ conf->worker_groups = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < conf->group_cnt; i++) {
+ struct r5worker_group *group;
+
+ group = &conf->worker_groups[i];
+ INIT_LIST_HEAD(&group->handle_list);
+ group->conf = conf;
+ group->workers = workers + i * cnt;
+
+ for (j = 0; j < cnt; j++) {
+ group->workers[j].group = group;
+ INIT_WORK(&group->workers[j].work, raid5_do_work);
+ }
+ }
+
+ return 0;
+}
+
+static void free_thread_groups(struct r5conf *conf)
+{
+ if (conf->worker_groups)
+ kfree(conf->worker_groups[0].workers);
+ kfree(conf->worker_groups);
+ conf->worker_groups = NULL;
+}
+
static sector_t
raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{
@@ -5040,6 +5342,7 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
kfree(conf->disks);
@@ -5168,7 +5471,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
if (conf == NULL)
goto abort;
+ /* Don't enable multi-threading by default*/
+ if (alloc_thread_groups(conf, 0))
+ goto abort;
spin_lock_init(&conf->device_lock);
+ seqcount_init(&conf->gen_lock);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
@@ -5176,6 +5483,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
INIT_LIST_HEAD(&conf->inactive_list);
+ init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
atomic_set(&conf->active_aligned_reads, 0);
@@ -5980,6 +6288,7 @@ static int raid5_start_reshape(struct mddev *mddev)
atomic_set(&conf->reshape_stripes, 0);
spin_lock_irq(&conf->device_lock);
+ write_seqcount_begin(&conf->gen_lock);
conf->previous_raid_disks = conf->raid_disks;
conf->raid_disks += mddev->delta_disks;
conf->prev_chunk_sectors = conf->chunk_sectors;
@@ -5996,8 +6305,16 @@ static int raid5_start_reshape(struct mddev *mddev)
else
conf->reshape_progress = 0;
conf->reshape_safe = conf->reshape_progress;
+ write_seqcount_end(&conf->gen_lock);
spin_unlock_irq(&conf->device_lock);
+ /* Now make sure any requests that proceeded on the assumption
+ * the reshape wasn't running - like Discard or Read - have
+ * completed.
+ */
+ mddev_suspend(mddev);
+ mddev_resume(mddev);
+
/* Add some new drives, as many as will fit.
* We know there are enough to make the newly sized array work.
* Don't add devices if we are reducing the number of
@@ -6472,6 +6789,10 @@ static struct md_personality raid4_personality =
static int __init raid5_init(void)
{
+ raid5_wq = alloc_workqueue("raid5wq",
+ WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
+ if (!raid5_wq)
+ return -ENOMEM;
register_md_personality(&raid6_personality);
register_md_personality(&raid5_personality);
register_md_personality(&raid4_personality);
@@ -6483,6 +6804,7 @@ static void raid5_exit(void)
unregister_md_personality(&raid6_personality);
unregister_md_personality(&raid5_personality);
unregister_md_personality(&raid4_personality);
+ destroy_workqueue(raid5_wq);
}
module_init(raid5_init);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 70c4932..2113ffa 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -197,6 +197,7 @@ enum reconstruct_states {
struct stripe_head {
struct hlist_node hash;
struct list_head lru; /* inactive_list or handle_list */
+ struct llist_node release_list;
struct r5conf *raid_conf;
short generation; /* increments with every
* reshape */
@@ -211,6 +212,8 @@ struct stripe_head {
enum check_states check_state;
enum reconstruct_states reconstruct_state;
spinlock_t stripe_lock;
+ int cpu;
+ struct r5worker_group *group;
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -321,6 +324,7 @@ enum {
STRIPE_OPS_REQ_PENDING,
STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD,
+ STRIPE_ON_RELEASE_LIST,
};
/*
@@ -363,6 +367,19 @@ struct disk_info {
struct md_rdev *rdev, *replacement;
};
+struct r5worker {
+ struct work_struct work;
+ struct r5worker_group *group;
+ bool working;
+};
+
+struct r5worker_group {
+ struct list_head handle_list;
+ struct r5conf *conf;
+ struct r5worker *workers;
+ int stripes_cnt;
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
struct mddev *mddev;
@@ -386,6 +403,7 @@ struct r5conf {
int prev_chunk_sectors;
int prev_algo;
short generation; /* increments with every reshape */
+ seqcount_t gen_lock; /* lock against generation changes */
unsigned long reshape_checkpoint; /* Time we last updated
* metadata */
long long min_offset_diff; /* minimum difference between
@@ -445,6 +463,7 @@ struct r5conf {
*/
atomic_t active_stripes;
struct list_head inactive_list;
+ struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
int inactive_blocked; /* release of inactive stripes blocked,
@@ -458,6 +477,9 @@ struct r5conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+ struct r5worker_group *worker_groups;
+ int group_cnt;
+ int worker_cnt_per_group;
};
/*
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 8068d7b..c7caf94 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -203,7 +203,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS && HAS_DMA
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 39882dd..6ecdc39 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -214,7 +214,7 @@ config RADIO_TIMBERDALE
config RADIO_WL1273
tristate "Texas Instruments WL1273 I2C FM Radio"
- depends on I2C && VIDEO_V4L2 && GENERIC_HARDIRQS
+ depends on I2C && VIDEO_V4L2
select MFD_CORE
select MFD_WL1273_CORE
select FW_LOADER
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
index 95f1814..1d38949 100644
--- a/drivers/memstick/core/Kconfig
+++ b/drivers/memstick/core/Kconfig
@@ -24,3 +24,15 @@ config MSPRO_BLOCK
support. This provides a block device driver, which you can use
to mount the filesystem. Almost everyone wishing MemoryStick
support should say Y or M here.
+
+config MS_BLOCK
+ tristate "MemoryStick Standard device driver"
+ depends on BLOCK
+ help
+ Say Y here to enable the MemoryStick Standard device driver
+ support. This provides a block device driver, which you can use
+ to mount the filesystem.
+ This driver works with old (bulky) MemoryStick and MemoryStick Duo
+ but not PRO. Say Y if you have such card.
+ Driver is new and not yet well tested, thus it can damage your card
+ (even permanently)
diff --git a/drivers/memstick/core/Makefile b/drivers/memstick/core/Makefile
index ecd0299..0d7f90c 100644
--- a/drivers/memstick/core/Makefile
+++ b/drivers/memstick/core/Makefile
@@ -3,5 +3,5 @@
#
obj-$(CONFIG_MEMSTICK) += memstick.o
-
+obj-$(CONFIG_MS_BLOCK) += ms_block.o
obj-$(CONFIG_MSPRO_BLOCK) += mspro_block.o
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
new file mode 100644
index 0000000..08e7023
--- /dev/null
+++ b/drivers/memstick/core/ms_block.c
@@ -0,0 +1,2385 @@
+/*
+ * ms_block.c - Sony MemoryStick (legacy) storage support
+
+ * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver were copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ */
+#define DRIVER_NAME "ms_block"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/memstick.h>
+#include <linux/idr.h>
+#include <linux/hdreg.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include "ms_block.h"
+
+static int debug;
+static int cache_flush_timeout = 1000;
+static bool verify_writes;
+
+/*
+ * Copies section of 'sg_from' starting from offset 'offset' and with length
+ * 'len' To another scatterlist of to_nents enties
+ */
+static size_t msb_sg_copy(struct scatterlist *sg_from,
+ struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
+{
+ size_t copied = 0;
+
+ while (offset > 0) {
+ if (offset >= sg_from->length) {
+ if (sg_is_last(sg_from))
+ return 0;
+
+ offset -= sg_from->length;
+ sg_from = sg_next(sg_from);
+ continue;
+ }
+
+ copied = min(len, sg_from->length - offset);
+ sg_set_page(sg_to, sg_page(sg_from),
+ copied, sg_from->offset + offset);
+
+ len -= copied;
+ offset = 0;
+
+ if (sg_is_last(sg_from) || !len)
+ goto out;
+
+ sg_to = sg_next(sg_to);
+ to_nents--;
+ sg_from = sg_next(sg_from);
+ }
+
+ while (len > sg_from->length && to_nents--) {
+ len -= sg_from->length;
+ copied += sg_from->length;
+
+ sg_set_page(sg_to, sg_page(sg_from),
+ sg_from->length, sg_from->offset);
+
+ if (sg_is_last(sg_from) || !len)
+ goto out;
+
+ sg_from = sg_next(sg_from);
+ sg_to = sg_next(sg_to);
+ }
+
+ if (len && to_nents) {
+ sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
+ copied += len;
+ }
+out:
+ sg_mark_end(sg_to);
+ return copied;
+}
+
+/*
+ * Compares section of 'sg' starting from offset 'offset' and with length 'len'
+ * to linear buffer of length 'len' at address 'buffer'
+ * Returns 0 if equal and -1 otherwice
+ */
+static int msb_sg_compare_to_buffer(struct scatterlist *sg,
+ size_t offset, u8 *buffer, size_t len)
+{
+ int retval = 0, cmplen;
+ struct sg_mapping_iter miter;
+
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+ while (sg_miter_next(&miter) && len > 0) {
+ if (offset >= miter.length) {
+ offset -= miter.length;
+ continue;
+ }
+
+ cmplen = min(miter.length - offset, len);
+ retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
+ if (retval)
+ break;
+
+ buffer += cmplen;
+ len -= cmplen;
+ offset = 0;
+ }
+
+ if (!retval && len)
+ retval = -1;
+
+ sg_miter_stop(&miter);
+ return retval;
+}
+
+
+/* Get zone at which block with logical address 'lba' lives
+ * Flash is broken into zones.
+ * Each zone consists of 512 eraseblocks, out of which in first
+ * zone 494 are used and 496 are for all following zones.
+ * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
+*/
+static int msb_get_zone_from_lba(int lba)
+{
+ if (lba < 494)
+ return 0;
+ return ((lba - 494) / 496) + 1;
+}
+
+/* Get zone of physical block. Trivial */
+static int msb_get_zone_from_pba(int pba)
+{
+ return pba / MS_BLOCKS_IN_ZONE;
+}
+
+/* Debug test to validate free block counts */
+static int msb_validate_used_block_bitmap(struct msb_data *msb)
+{
+ int total_free_blocks = 0;
+ int i;
+
+ if (!debug)
+ return 0;
+
+ for (i = 0; i < msb->zone_count; i++)
+ total_free_blocks += msb->free_block_count[i];
+
+ if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
+ msb->block_count) == total_free_blocks)
+ return 0;
+
+ pr_err("BUG: free block counts don't match the bitmap");
+ msb->read_only = true;
+ return -EINVAL;
+}
+
+/* Mark physical block as used */
+static void msb_mark_block_used(struct msb_data *msb, int pba)
+{
+ int zone = msb_get_zone_from_pba(pba);
+
+ if (test_bit(pba, msb->used_blocks_bitmap)) {
+ pr_err(
+ "BUG: attempt to mark already used pba %d as used", pba);
+ msb->read_only = true;
+ return;
+ }
+
+ if (msb_validate_used_block_bitmap(msb))
+ return;
+
+ /* No races because all IO is single threaded */
+ __set_bit(pba, msb->used_blocks_bitmap);
+ msb->free_block_count[zone]--;
+}
+
+/* Mark physical block as free */
+static void msb_mark_block_unused(struct msb_data *msb, int pba)
+{
+ int zone = msb_get_zone_from_pba(pba);
+
+ if (!test_bit(pba, msb->used_blocks_bitmap)) {
+ pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
+ msb->read_only = true;
+ return;
+ }
+
+ if (msb_validate_used_block_bitmap(msb))
+ return;
+
+ /* No races because all IO is single threaded */
+ __clear_bit(pba, msb->used_blocks_bitmap);
+ msb->free_block_count[zone]++;
+}
+
+/* Invalidate current register window */
+static void msb_invalidate_reg_window(struct msb_data *msb)
+{
+ msb->reg_addr.w_offset = offsetof(struct ms_register, id);
+ msb->reg_addr.w_length = sizeof(struct ms_id_register);
+ msb->reg_addr.r_offset = offsetof(struct ms_register, id);
+ msb->reg_addr.r_length = sizeof(struct ms_id_register);
+ msb->addr_valid = false;
+}
+
+/* Start a state machine */
+static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
+ (struct memstick_dev *card, struct memstick_request **req))
+{
+ struct memstick_dev *card = msb->card;
+
+ WARN_ON(msb->state != -1);
+ msb->int_polling = false;
+ msb->state = 0;
+ msb->exit_error = 0;
+
+ memset(&card->current_mrq, 0, sizeof(card->current_mrq));
+
+ card->next_request = state_func;
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+
+ WARN_ON(msb->state != -1);
+ return msb->exit_error;
+}
+
+/* State machines call that to exit */
+static int msb_exit_state_machine(struct msb_data *msb, int error)
+{
+ WARN_ON(msb->state == -1);
+
+ msb->state = -1;
+ msb->exit_error = error;
+ msb->card->next_request = h_msb_default_bad;
+
+ /* Invalidate reg window on errors */
+ if (error)
+ msb_invalidate_reg_window(msb);
+
+ complete(&msb->card->mrq_complete);
+ return -ENXIO;
+}
+
+/* read INT register */
+static int msb_read_int_reg(struct msb_data *msb, long timeout)
+{
+ struct memstick_request *mrq = &msb->card->current_mrq;
+
+ WARN_ON(msb->state == -1);
+
+ if (!msb->int_polling) {
+ msb->int_timeout = jiffies +
+ msecs_to_jiffies(timeout == -1 ? 500 : timeout);
+ msb->int_polling = true;
+ } else if (time_after(jiffies, msb->int_timeout)) {
+ mrq->data[0] = MEMSTICK_INT_CMDNAK;
+ return 0;
+ }
+
+ if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
+ mrq->need_card_int && !mrq->error) {
+ mrq->data[0] = mrq->int_reg;
+ mrq->need_card_int = false;
+ return 0;
+ } else {
+ memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+ return 1;
+ }
+}
+
+/* Read a register */
+static int msb_read_regs(struct msb_data *msb, int offset, int len)
+{
+ struct memstick_request *req = &msb->card->current_mrq;
+
+ if (msb->reg_addr.r_offset != offset ||
+ msb->reg_addr.r_length != len || !msb->addr_valid) {
+
+ msb->reg_addr.r_offset = offset;
+ msb->reg_addr.r_length = len;
+ msb->addr_valid = true;
+
+ memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+ &msb->reg_addr, sizeof(msb->reg_addr));
+ return 0;
+ }
+
+ memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
+ return 1;
+}
+
+/* Write a card register */
+static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
+{
+ struct memstick_request *req = &msb->card->current_mrq;
+
+ if (msb->reg_addr.w_offset != offset ||
+ msb->reg_addr.w_length != len || !msb->addr_valid) {
+
+ msb->reg_addr.w_offset = offset;
+ msb->reg_addr.w_length = len;
+ msb->addr_valid = true;
+
+ memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+ &msb->reg_addr, sizeof(msb->reg_addr));
+ return 0;
+ }
+
+ memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
+ return 1;
+}
+
+/* Handler for absence of IO */
+static int h_msb_default_bad(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ return -ENXIO;
+}
+
+/*
+ * This function is a handler for reads of one page from device.
+ * Writes output to msb->current_sg, takes sector address from msb->reg.param
+ * Can also be used to read extra data only. Set params accordintly.
+ */
+static int h_msb_read_page(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct scatterlist sg[2];
+ u8 command, intreg;
+
+ if (mrq->error) {
+ dbg("read_page, unknown error");
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+again:
+ switch (msb->state) {
+ case MSB_RP_SEND_BLOCK_ADDRESS:
+ /* msb_write_regs sometimes "fails" because it needs to update
+ the reg window, and thus it returns request for that.
+ Then we stay in this state and retry */
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ (unsigned char *)&msb->regs.param))
+ return 0;
+
+ msb->state = MSB_RP_SEND_READ_COMMAND;
+ return 0;
+
+ case MSB_RP_SEND_READ_COMMAND:
+ command = MS_CMD_BLOCK_READ;
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ msb->state = MSB_RP_SEND_INT_REQ;
+ return 0;
+
+ case MSB_RP_SEND_INT_REQ:
+ msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
+ /* If dont actually need to send the int read request (only in
+ serial mode), then just fall through */
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ /* fallthrough */
+
+ case MSB_RP_RECEIVE_INT_REQ_RESULT:
+ intreg = mrq->data[0];
+ msb->regs.status.interrupt = intreg;
+
+ if (intreg & MEMSTICK_INT_CMDNAK)
+ return msb_exit_state_machine(msb, -EIO);
+
+ if (!(intreg & MEMSTICK_INT_CED)) {
+ msb->state = MSB_RP_SEND_INT_REQ;
+ goto again;
+ }
+
+ msb->int_polling = false;
+ msb->state = (intreg & MEMSTICK_INT_ERR) ?
+ MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
+ goto again;
+
+ case MSB_RP_SEND_READ_STATUS_REG:
+ /* read the status register to understand source of the INT_ERR */
+ if (!msb_read_regs(msb,
+ offsetof(struct ms_register, status),
+ sizeof(struct ms_status_register)))
+ return 0;
+
+ msb->state = MSB_RP_RECEIVE_OOB_READ;
+ return 0;
+
+ case MSB_RP_RECIVE_STATUS_REG:
+ msb->regs.status = *(struct ms_status_register *)mrq->data;
+ msb->state = MSB_RP_SEND_OOB_READ;
+ /* fallthrough */
+
+ case MSB_RP_SEND_OOB_READ:
+ if (!msb_read_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register)))
+ return 0;
+
+ msb->state = MSB_RP_RECEIVE_OOB_READ;
+ return 0;
+
+ case MSB_RP_RECEIVE_OOB_READ:
+ msb->regs.extra_data =
+ *(struct ms_extra_data_register *) mrq->data;
+ msb->state = MSB_RP_SEND_READ_DATA;
+ /* fallthrough */
+
+ case MSB_RP_SEND_READ_DATA:
+ /* Skip that state if we only read the oob */
+ if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
+ msb->state = MSB_RP_RECEIVE_READ_DATA;
+ goto again;
+ }
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+ msb->current_sg_offset,
+ msb->page_size);
+
+ memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
+ msb->state = MSB_RP_RECEIVE_READ_DATA;
+ return 0;
+
+ case MSB_RP_RECEIVE_READ_DATA:
+ if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
+ msb->current_sg_offset += msb->page_size;
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
+ dbg("read_page: uncorrectable error");
+ return msb_exit_state_machine(msb, -EBADMSG);
+ }
+
+ if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
+ dbg("read_page: correctable error");
+ msb->current_sg_offset += msb->page_size;
+ return msb_exit_state_machine(msb, -EUCLEAN);
+ } else {
+ dbg("read_page: INT error, but no status error bits");
+ return msb_exit_state_machine(msb, -EIO);
+ }
+ }
+
+ BUG();
+}
+
+/*
+ * Handler of writes of exactly one block.
+ * Takes address from msb->regs.param.
+ * Writes same extra data to blocks, also taken
+ * from msb->regs.extra
+ * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
+ * device refuses to take the command or something else
+ */
+static int h_msb_write_block(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct scatterlist sg[2];
+ u8 intreg, command;
+
+ if (mrq->error)
+ return msb_exit_state_machine(msb, mrq->error);
+
+again:
+ switch (msb->state) {
+
+ /* HACK: Jmicon handling of TPCs between 8 and
+ * sizeof(memstick_request.data) is broken due to hardware
+ * bug in PIO mode that is used for these TPCs
+ * Therefore split the write
+ */
+
+ case MSB_WB_SEND_WRITE_PARAMS:
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ &msb->regs.param))
+ return 0;
+
+ msb->state = MSB_WB_SEND_WRITE_OOB;
+ return 0;
+
+ case MSB_WB_SEND_WRITE_OOB:
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register),
+ &msb->regs.extra_data))
+ return 0;
+ msb->state = MSB_WB_SEND_WRITE_COMMAND;
+ return 0;
+
+
+ case MSB_WB_SEND_WRITE_COMMAND:
+ command = MS_CMD_BLOCK_WRITE;
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ msb->state = MSB_WB_SEND_INT_REQ;
+ return 0;
+
+ case MSB_WB_SEND_INT_REQ:
+ msb->state = MSB_WB_RECEIVE_INT_REQ;
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ /* fallthrough */
+
+ case MSB_WB_RECEIVE_INT_REQ:
+ intreg = mrq->data[0];
+ msb->regs.status.interrupt = intreg;
+
+ /* errors mean out of here, and fast... */
+ if (intreg & (MEMSTICK_INT_CMDNAK))
+ return msb_exit_state_machine(msb, -EIO);
+
+ if (intreg & MEMSTICK_INT_ERR)
+ return msb_exit_state_machine(msb, -EBADMSG);
+
+
+ /* for last page we need to poll CED */
+ if (msb->current_page == msb->pages_in_block) {
+ if (intreg & MEMSTICK_INT_CED)
+ return msb_exit_state_machine(msb, 0);
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+
+ }
+
+ /* for non-last page we need BREQ before writing next chunk */
+ if (!(intreg & MEMSTICK_INT_BREQ)) {
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+ }
+
+ msb->int_polling = false;
+ msb->state = MSB_WB_SEND_WRITE_DATA;
+ /* fallthrough */
+
+ case MSB_WB_SEND_WRITE_DATA:
+ sg_init_table(sg, ARRAY_SIZE(sg));
+
+ if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+ msb->current_sg_offset,
+ msb->page_size) < msb->page_size)
+ return msb_exit_state_machine(msb, -EIO);
+
+ memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
+ mrq->need_card_int = 1;
+ msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
+ return 0;
+
+ case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
+ msb->current_page++;
+ msb->current_sg_offset += msb->page_size;
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used to send simple IO requests to device that consist
+ * of register write + command
+ */
+static int h_msb_send_command(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ u8 intreg;
+
+ if (mrq->error) {
+ dbg("send_command: unknown error");
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+again:
+ switch (msb->state) {
+
+ /* HACK: see h_msb_write_block */
+ case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ &msb->regs.param))
+ return 0;
+ msb->state = MSB_SC_SEND_WRITE_OOB;
+ return 0;
+
+ case MSB_SC_SEND_WRITE_OOB:
+ if (!msb->command_need_oob) {
+ msb->state = MSB_SC_SEND_COMMAND;
+ goto again;
+ }
+
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register),
+ &msb->regs.extra_data))
+ return 0;
+
+ msb->state = MSB_SC_SEND_COMMAND;
+ return 0;
+
+ case MSB_SC_SEND_COMMAND:
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
+ msb->state = MSB_SC_SEND_INT_REQ;
+ return 0;
+
+ case MSB_SC_SEND_INT_REQ:
+ msb->state = MSB_SC_RECEIVE_INT_REQ;
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ /* fallthrough */
+
+ case MSB_SC_RECEIVE_INT_REQ:
+ intreg = mrq->data[0];
+
+ if (intreg & MEMSTICK_INT_CMDNAK)
+ return msb_exit_state_machine(msb, -EIO);
+ if (intreg & MEMSTICK_INT_ERR)
+ return msb_exit_state_machine(msb, -EBADMSG);
+
+ if (!(intreg & MEMSTICK_INT_CED)) {
+ msb->state = MSB_SC_SEND_INT_REQ;
+ goto again;
+ }
+
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ BUG();
+}
+
+/* Small handler for card reset */
+static int h_msb_reset(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ u8 command = MS_CMD_RESET;
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+
+ if (mrq->error)
+ return msb_exit_state_machine(msb, mrq->error);
+
+ switch (msb->state) {
+ case MSB_RS_SEND:
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ mrq->need_card_int = 0;
+ msb->state = MSB_RS_CONFIRM;
+ return 0;
+ case MSB_RS_CONFIRM:
+ return msb_exit_state_machine(msb, 0);
+ }
+ BUG();
+}
+
+/* This handler is used to do serial->parallel switch */
+static int h_msb_parallel_switch(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct memstick_host *host = card->host;
+
+ if (mrq->error) {
+ dbg("parallel_switch: error");
+ msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+
+ switch (msb->state) {
+ case MSB_PS_SEND_SWITCH_COMMAND:
+ /* Set the parallel interface on memstick side */
+ msb->regs.param.system |= MEMSTICK_SYS_PAM;
+
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ 1,
+ (unsigned char *)&msb->regs.param))
+ return 0;
+
+ msb->state = MSB_PS_SWICH_HOST;
+ return 0;
+
+ case MSB_PS_SWICH_HOST:
+ /* Set parallel interface on our side + send a dummy request
+ to see if card responds */
+ host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+ memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+ msb->state = MSB_PS_CONFIRM;
+ return 0;
+
+ case MSB_PS_CONFIRM:
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ BUG();
+}
+
+static int msb_switch_to_parallel(struct msb_data *msb);
+
+/* Reset the card, to guard against hw errors beeing treated as bad blocks */
+static int msb_reset(struct msb_data *msb, bool full)
+{
+
+ bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
+ struct memstick_dev *card = msb->card;
+ struct memstick_host *host = card->host;
+ int error;
+
+ /* Reset the card */
+ msb->regs.param.system = MEMSTICK_SYS_BAMD;
+
+ if (full) {
+ error = host->set_param(host,
+ MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+ if (error)
+ goto out_error;
+
+ msb_invalidate_reg_window(msb);
+
+ error = host->set_param(host,
+ MEMSTICK_POWER, MEMSTICK_POWER_ON);
+ if (error)
+ goto out_error;
+
+ error = host->set_param(host,
+ MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+ if (error) {
+out_error:
+ dbg("Failed to reset the host controller");
+ msb->read_only = true;
+ return -EFAULT;
+ }
+ }
+
+ error = msb_run_state_machine(msb, h_msb_reset);
+ if (error) {
+ dbg("Failed to reset the card");
+ msb->read_only = true;
+ return -ENODEV;
+ }
+
+ /* Set parallel mode */
+ if (was_parallel)
+ msb_switch_to_parallel(msb);
+ return 0;
+}
+
+/* Attempts to switch interface to parallel mode */
+static int msb_switch_to_parallel(struct msb_data *msb)
+{
+ int error;
+
+ error = msb_run_state_machine(msb, h_msb_parallel_switch);
+ if (error) {
+ pr_err("Switch to parallel failed");
+ msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+ msb_reset(msb, true);
+ return -EFAULT;
+ }
+
+ msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
+ return 0;
+}
+
+/* Changes overwrite flag on a page */
+static int msb_set_overwrite_flag(struct msb_data *msb,
+ u16 pba, u8 page, u8 flag)
+{
+ if (msb->read_only)
+ return -EROFS;
+
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
+ msb->regs.extra_data.overwrite_flag = flag;
+ msb->command_value = MS_CMD_BLOCK_WRITE;
+ msb->command_need_oob = true;
+
+ dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
+ flag, pba, page);
+ return msb_run_state_machine(msb, h_msb_send_command);
+}
+
+static int msb_mark_bad(struct msb_data *msb, int pba)
+{
+ pr_notice("marking pba %d as bad", pba);
+ msb_reset(msb, true);
+ return msb_set_overwrite_flag(
+ msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
+}
+
+static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
+{
+ dbg("marking page %d of pba %d as bad", page, pba);
+ msb_reset(msb, true);
+ return msb_set_overwrite_flag(msb,
+ pba, page, ~MEMSTICK_OVERWRITE_PGST0);
+}
+
+/* Erases one physical block */
+static int msb_erase_block(struct msb_data *msb, u16 pba)
+{
+ int error, try;
+ if (msb->read_only)
+ return -EROFS;
+
+ dbg_verbose("erasing pba %d", pba);
+
+ for (try = 1; try < 3; try++) {
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = 0;
+ msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+ msb->command_value = MS_CMD_BLOCK_ERASE;
+ msb->command_need_oob = false;
+
+
+ error = msb_run_state_machine(msb, h_msb_send_command);
+ if (!error || msb_reset(msb, true))
+ break;
+ }
+
+ if (error) {
+ pr_err("erase failed, marking pba %d as bad", pba);
+ msb_mark_bad(msb, pba);
+ }
+
+ dbg_verbose("erase success, marking pba %d as unused", pba);
+ msb_mark_block_unused(msb, pba);
+ __set_bit(pba, msb->erased_blocks_bitmap);
+ return error;
+}
+
+/* Reads one page from device */
+static int msb_read_page(struct msb_data *msb,
+ u16 pba, u8 page, struct ms_extra_data_register *extra,
+ struct scatterlist *sg, int offset)
+{
+ int try, error;
+
+ if (pba == MS_BLOCK_INVALID) {
+ unsigned long flags;
+ struct sg_mapping_iter miter;
+ size_t len = msb->page_size;
+
+ dbg_verbose("read unmapped sector. returning 0xFF");
+
+ local_irq_save(flags);
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+ while (sg_miter_next(&miter) && len > 0) {
+
+ int chunklen;
+
+ if (offset && offset >= miter.length) {
+ offset -= miter.length;
+ continue;
+ }
+
+ chunklen = min(miter.length - offset, len);
+ memset(miter.addr + offset, 0xFF, chunklen);
+ len -= chunklen;
+ offset = 0;
+ }
+
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+
+ if (offset)
+ return -EFAULT;
+
+ if (extra)
+ memset(extra, 0xFF, sizeof(*extra));
+ return 0;
+ }
+
+ if (pba >= msb->block_count) {
+ pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
+ return -EINVAL;
+ }
+
+ for (try = 1; try < 3; try++) {
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_PAGE;
+
+ msb->current_sg = sg;
+ msb->current_sg_offset = offset;
+ error = msb_run_state_machine(msb, h_msb_read_page);
+
+
+ if (error == -EUCLEAN) {
+ pr_notice("correctable error on pba %d, page %d",
+ pba, page);
+ error = 0;
+ }
+
+ if (!error && extra)
+ *extra = msb->regs.extra_data;
+
+ if (!error || msb_reset(msb, true))
+ break;
+
+ }
+
+ /* Mark bad pages */
+ if (error == -EBADMSG) {
+ pr_err("uncorrectable error on read of pba %d, page %d",
+ pba, page);
+
+ if (msb->regs.extra_data.overwrite_flag &
+ MEMSTICK_OVERWRITE_PGST0)
+ msb_mark_page_bad(msb, pba, page);
+ return -EBADMSG;
+ }
+
+ if (error)
+ pr_err("read of pba %d, page %d failed with error %d",
+ pba, page, error);
+ return error;
+}
+
+/* Reads oob of page only */
+static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
+ struct ms_extra_data_register *extra)
+{
+ int error;
+
+ BUG_ON(!extra);
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_EXTRA;
+
+ if (pba > msb->block_count) {
+ pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
+ return -EINVAL;
+ }
+
+ error = msb_run_state_machine(msb, h_msb_read_page);
+ *extra = msb->regs.extra_data;
+
+ if (error == -EUCLEAN) {
+ pr_notice("correctable error on pba %d, page %d",
+ pba, page);
+ return 0;
+ }
+
+ return error;
+}
+
+/* Reads a block and compares it with data contained in scatterlist orig_sg */
+static int msb_verify_block(struct msb_data *msb, u16 pba,
+ struct scatterlist *orig_sg, int offset)
+{
+ struct scatterlist sg;
+ int page = 0, error;
+
+ sg_init_one(&sg, msb->block_buffer, msb->block_size);
+
+ while (page < msb->pages_in_block) {
+
+ error = msb_read_page(msb, pba, page,
+ NULL, &sg, page * msb->page_size);
+ if (error)
+ return error;
+ page++;
+ }
+
+ if (msb_sg_compare_to_buffer(orig_sg, offset,
+ msb->block_buffer, msb->block_size))
+ return -EIO;
+ return 0;
+}
+
+/* Writes exectly one block + oob */
+static int msb_write_block(struct msb_data *msb,
+ u16 pba, u32 lba, struct scatterlist *sg, int offset)
+{
+ int error, current_try = 1;
+ BUG_ON(sg->length < msb->page_size);
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (pba == MS_BLOCK_INVALID) {
+ pr_err(
+ "BUG: write: attempt to write MS_BLOCK_INVALID block");
+ return -EINVAL;
+ }
+
+ if (pba >= msb->block_count || lba >= msb->logical_block_count) {
+ pr_err(
+ "BUG: write: attempt to write beyond the end of device");
+ return -EINVAL;
+ }
+
+ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+ pr_err("BUG: write: lba zone mismatch");
+ return -EINVAL;
+ }
+
+ if (pba == msb->boot_block_locations[0] ||
+ pba == msb->boot_block_locations[1]) {
+ pr_err("BUG: write: attempt to write to boot blocks!");
+ return -EINVAL;
+ }
+
+ while (1) {
+
+ if (msb->read_only)
+ return -EROFS;
+
+ msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+ msb->regs.param.page_address = 0;
+ msb->regs.param.block_address = cpu_to_be16(pba);
+
+ msb->regs.extra_data.management_flag = 0xFF;
+ msb->regs.extra_data.overwrite_flag = 0xF8;
+ msb->regs.extra_data.logical_address = cpu_to_be16(lba);
+
+ msb->current_sg = sg;
+ msb->current_sg_offset = offset;
+ msb->current_page = 0;
+
+ error = msb_run_state_machine(msb, h_msb_write_block);
+
+ /* Sector we just wrote to is assumed erased since its pba
+ was erased. If it wasn't erased, write will succeed
+ and will just clear the bits that were set in the block
+ thus test that what we have written,
+ matches what we expect.
+ We do trust the blocks that we erased */
+ if (!error && (verify_writes ||
+ !test_bit(pba, msb->erased_blocks_bitmap)))
+ error = msb_verify_block(msb, pba, sg, offset);
+
+ if (!error)
+ break;
+
+ if (current_try > 1 || msb_reset(msb, true))
+ break;
+
+ pr_err("write failed, trying to erase the pba %d", pba);
+ error = msb_erase_block(msb, pba);
+ if (error)
+ break;
+
+ current_try++;
+ }
+ return error;
+}
+
+/* Finds a free block for write replacement */
+static u16 msb_get_free_block(struct msb_data *msb, int zone)
+{
+ u16 pos;
+ int pba = zone * MS_BLOCKS_IN_ZONE;
+ int i;
+
+ get_random_bytes(&pos, sizeof(pos));
+
+ if (!msb->free_block_count[zone]) {
+ pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
+ msb->read_only = true;
+ return MS_BLOCK_INVALID;
+ }
+
+ pos %= msb->free_block_count[zone];
+
+ dbg_verbose("have %d choices for a free block, selected randomally: %d",
+ msb->free_block_count[zone], pos);
+
+ pba = find_next_zero_bit(msb->used_blocks_bitmap,
+ msb->block_count, pba);
+ for (i = 0; i < pos; ++i)
+ pba = find_next_zero_bit(msb->used_blocks_bitmap,
+ msb->block_count, pba + 1);
+
+ dbg_verbose("result of the free blocks scan: pba %d", pba);
+
+ if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
+ pr_err("BUG: cant get a free block");
+ msb->read_only = true;
+ return MS_BLOCK_INVALID;
+ }
+
+ msb_mark_block_used(msb, pba);
+ return pba;
+}
+
+static int msb_update_block(struct msb_data *msb, u16 lba,
+ struct scatterlist *sg, int offset)
+{
+ u16 pba, new_pba;
+ int error, try;
+
+ pba = msb->lba_to_pba_table[lba];
+ dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
+
+ if (pba != MS_BLOCK_INVALID) {
+ dbg_verbose("setting the update flag on the block");
+ msb_set_overwrite_flag(msb, pba, 0,
+ 0xFF & ~MEMSTICK_OVERWRITE_UDST);
+ }
+
+ for (try = 0; try < 3; try++) {
+ new_pba = msb_get_free_block(msb,
+ msb_get_zone_from_lba(lba));
+
+ if (new_pba == MS_BLOCK_INVALID) {
+ error = -EIO;
+ goto out;
+ }
+
+ dbg_verbose("block update: writing updated block to the pba %d",
+ new_pba);
+ error = msb_write_block(msb, new_pba, lba, sg, offset);
+ if (error == -EBADMSG) {
+ msb_mark_bad(msb, new_pba);
+ continue;
+ }
+
+ if (error)
+ goto out;
+
+ dbg_verbose("block update: erasing the old block");
+ msb_erase_block(msb, pba);
+ msb->lba_to_pba_table[lba] = new_pba;
+ return 0;
+ }
+out:
+ if (error) {
+ pr_err("block update error after %d tries, switching to r/o mode", try);
+ msb->read_only = true;
+ }
+ return error;
+}
+
+/* Converts endiannes in the boot block for easy use */
+static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
+{
+ p->header.block_id = be16_to_cpu(p->header.block_id);
+ p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
+ p->entry.disabled_block.start_addr
+ = be32_to_cpu(p->entry.disabled_block.start_addr);
+ p->entry.disabled_block.data_size
+ = be32_to_cpu(p->entry.disabled_block.data_size);
+ p->entry.cis_idi.start_addr
+ = be32_to_cpu(p->entry.cis_idi.start_addr);
+ p->entry.cis_idi.data_size
+ = be32_to_cpu(p->entry.cis_idi.data_size);
+ p->attr.block_size = be16_to_cpu(p->attr.block_size);
+ p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
+ p->attr.number_of_effective_blocks
+ = be16_to_cpu(p->attr.number_of_effective_blocks);
+ p->attr.page_size = be16_to_cpu(p->attr.page_size);
+ p->attr.memory_manufacturer_code
+ = be16_to_cpu(p->attr.memory_manufacturer_code);
+ p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
+ p->attr.implemented_capacity
+ = be16_to_cpu(p->attr.implemented_capacity);
+ p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
+ p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
+}
+
+static int msb_read_boot_blocks(struct msb_data *msb)
+{
+ int pba = 0;
+ struct scatterlist sg;
+ struct ms_extra_data_register extra;
+ struct ms_boot_page *page;
+
+ msb->boot_block_locations[0] = MS_BLOCK_INVALID;
+ msb->boot_block_locations[1] = MS_BLOCK_INVALID;
+ msb->boot_block_count = 0;
+
+ dbg_verbose("Start of a scan for the boot blocks");
+
+ if (!msb->boot_page) {
+ page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ msb->boot_page = page;
+ } else
+ page = msb->boot_page;
+
+ msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
+
+ for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
+
+ sg_init_one(&sg, page, sizeof(*page));
+ if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
+ dbg("boot scan: can't read pba %d", pba);
+ continue;
+ }
+
+ if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
+ dbg("managment flag doesn't indicate boot block %d",
+ pba);
+ continue;
+ }
+
+ if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
+ dbg("the pba at %d doesn' contain boot block ID", pba);
+ continue;
+ }
+
+ msb_fix_boot_page_endianness(page);
+ msb->boot_block_locations[msb->boot_block_count] = pba;
+
+ page++;
+ msb->boot_block_count++;
+
+ if (msb->boot_block_count == 2)
+ break;
+ }
+
+ if (!msb->boot_block_count) {
+ pr_err("media doesn't contain master page, aborting");
+ return -EIO;
+ }
+
+ dbg_verbose("End of scan for boot blocks");
+ return 0;
+}
+
+static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
+{
+ struct ms_boot_page *boot_block;
+ struct scatterlist sg;
+ u16 *buffer = NULL;
+ int offset = 0;
+ int i, error = 0;
+ int data_size, data_offset, page, page_offset, size_to_read;
+ u16 pba;
+
+ BUG_ON(block_nr > 1);
+ boot_block = &msb->boot_page[block_nr];
+ pba = msb->boot_block_locations[block_nr];
+
+ if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
+ return -EINVAL;
+
+ data_size = boot_block->entry.disabled_block.data_size;
+ data_offset = sizeof(struct ms_boot_page) +
+ boot_block->entry.disabled_block.start_addr;
+ if (!data_size)
+ return 0;
+
+ page = data_offset / msb->page_size;
+ page_offset = data_offset % msb->page_size;
+ size_to_read =
+ DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
+ msb->page_size;
+
+ dbg("reading bad block of boot block at pba %d, offset %d len %d",
+ pba, data_offset, data_size);
+
+ buffer = kzalloc(size_to_read, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Read the buffer */
+ sg_init_one(&sg, buffer, size_to_read);
+
+ while (offset < size_to_read) {
+ error = msb_read_page(msb, pba, page, NULL, &sg, offset);
+ if (error)
+ goto out;
+
+ page++;
+ offset += msb->page_size;
+
+ if (page == msb->pages_in_block) {
+ pr_err(
+ "bad block table extends beyond the boot block");
+ break;
+ }
+ }
+
+ /* Process the bad block table */
+ for (i = page_offset; i < data_size / sizeof(u16); i++) {
+
+ u16 bad_block = be16_to_cpu(buffer[i]);
+
+ if (bad_block >= msb->block_count) {
+ dbg("bad block table contains invalid block %d",
+ bad_block);
+ continue;
+ }
+
+ if (test_bit(bad_block, msb->used_blocks_bitmap)) {
+ dbg("duplicate bad block %d in the table",
+ bad_block);
+ continue;
+ }
+
+ dbg("block %d is marked as factory bad", bad_block);
+ msb_mark_block_used(msb, bad_block);
+ }
+out:
+ kfree(buffer);
+ return error;
+}
+
+static int msb_ftl_initialize(struct msb_data *msb)
+{
+ int i;
+
+ if (msb->ftl_initialized)
+ return 0;
+
+ msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
+ msb->logical_block_count = msb->zone_count * 496 - 2;
+
+ msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->lba_to_pba_table =
+ kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
+
+ if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
+ !msb->erased_blocks_bitmap) {
+ kfree(msb->used_blocks_bitmap);
+ kfree(msb->lba_to_pba_table);
+ kfree(msb->erased_blocks_bitmap);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < msb->zone_count; i++)
+ msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
+
+ memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
+ msb->logical_block_count * sizeof(u16));
+
+ dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
+ msb->zone_count, msb->logical_block_count);
+
+ msb->ftl_initialized = true;
+ return 0;
+}
+
+static int msb_ftl_scan(struct msb_data *msb)
+{
+ u16 pba, lba, other_block;
+ u8 overwrite_flag, managment_flag, other_overwrite_flag;
+ int error;
+ struct ms_extra_data_register extra;
+ u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
+
+ if (!overwrite_flags)
+ return -ENOMEM;
+
+ dbg("Start of media scanning");
+ for (pba = 0; pba < msb->block_count; pba++) {
+
+ if (pba == msb->boot_block_locations[0] ||
+ pba == msb->boot_block_locations[1]) {
+ dbg_verbose("pba %05d -> [boot block]", pba);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ if (test_bit(pba, msb->used_blocks_bitmap)) {
+ dbg_verbose("pba %05d -> [factory bad]", pba);
+ continue;
+ }
+
+ memset(&extra, 0, sizeof(extra));
+ error = msb_read_oob(msb, pba, 0, &extra);
+
+ /* can't trust the page if we can't read the oob */
+ if (error == -EBADMSG) {
+ pr_notice(
+ "oob of pba %d damaged, will try to erase it", pba);
+ msb_mark_block_used(msb, pba);
+ msb_erase_block(msb, pba);
+ continue;
+ } else if (error) {
+ pr_err("unknown error %d on read of oob of pba %d - aborting",
+ error, pba);
+
+ kfree(overwrite_flags);
+ return error;
+ }
+
+ lba = be16_to_cpu(extra.logical_address);
+ managment_flag = extra.management_flag;
+ overwrite_flag = extra.overwrite_flag;
+ overwrite_flags[pba] = overwrite_flag;
+
+ /* Skip bad blocks */
+ if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
+ dbg("pba %05d -> [BAD]", pba);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ /* Skip system/drm blocks */
+ if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
+ MEMSTICK_MANAGMENT_FLAG_NORMAL) {
+ dbg("pba %05d -> [reserved managment flag %02x]",
+ pba, managment_flag);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ /* Erase temporary tables */
+ if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+ dbg("pba %05d -> [temp table] - will erase", pba);
+
+ msb_mark_block_used(msb, pba);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ if (lba == MS_BLOCK_INVALID) {
+ dbg_verbose("pba %05d -> [free]", pba);
+ continue;
+ }
+
+ msb_mark_block_used(msb, pba);
+
+ /* Block has LBA not according to zoning*/
+ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+ pr_notice("pba %05d -> [bad lba %05d] - will erase",
+ pba, lba);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ /* No collisions - great */
+ if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
+ dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
+ msb->lba_to_pba_table[lba] = pba;
+ continue;
+ }
+
+ other_block = msb->lba_to_pba_table[lba];
+ other_overwrite_flag = overwrite_flags[other_block];
+
+ pr_notice("Collision between pba %d and pba %d",
+ pba, other_block);
+
+ if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+ pr_notice("pba %d is marked as stable, use it", pba);
+ msb_erase_block(msb, other_block);
+ msb->lba_to_pba_table[lba] = pba;
+ continue;
+ }
+
+ if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+ pr_notice("pba %d is marked as stable, use it",
+ other_block);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
+ pba, other_block, other_block);
+
+ msb_erase_block(msb, other_block);
+ msb->lba_to_pba_table[lba] = pba;
+ }
+
+ dbg("End of media scanning");
+ kfree(overwrite_flags);
+ return 0;
+}
+
+static void msb_cache_flush_timer(unsigned long data)
+{
+ struct msb_data *msb = (struct msb_data *)data;
+ msb->need_flush_cache = true;
+ queue_work(msb->io_queue, &msb->io_work);
+}
+
+
+static void msb_cache_discard(struct msb_data *msb)
+{
+ if (msb->cache_block_lba == MS_BLOCK_INVALID)
+ return;
+
+ del_timer_sync(&msb->cache_flush_timer);
+
+ dbg_verbose("Discarding the write cache");
+ msb->cache_block_lba = MS_BLOCK_INVALID;
+ bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
+}
+
+static int msb_cache_init(struct msb_data *msb)
+{
+ setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
+ (unsigned long)msb);
+
+ if (!msb->cache)
+ msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
+ if (!msb->cache)
+ return -ENOMEM;
+
+ msb_cache_discard(msb);
+ return 0;
+}
+
+static int msb_cache_flush(struct msb_data *msb)
+{
+ struct scatterlist sg;
+ struct ms_extra_data_register extra;
+ int page, offset, error;
+ u16 pba, lba;
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID)
+ return 0;
+
+ lba = msb->cache_block_lba;
+ pba = msb->lba_to_pba_table[lba];
+
+ dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
+ pba, msb->cache_block_lba);
+
+ sg_init_one(&sg, msb->cache , msb->block_size);
+
+ /* Read all missing pages in cache */
+ for (page = 0; page < msb->pages_in_block; page++) {
+
+ if (test_bit(page, &msb->valid_cache_bitmap))
+ continue;
+
+ offset = page * msb->page_size;
+
+ dbg_verbose("reading non-present sector %d of cache block %d",
+ page, lba);
+ error = msb_read_page(msb, pba, page, &extra, &sg, offset);
+
+ /* Bad pages are copied with 00 page status */
+ if (error == -EBADMSG) {
+ pr_err("read error on sector %d, contents probably damaged", page);
+ continue;
+ }
+
+ if (error)
+ return error;
+
+ if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
+ MEMSTICK_OV_PG_NORMAL) {
+ dbg("page %d is marked as bad", page);
+ continue;
+ }
+
+ set_bit(page, &msb->valid_cache_bitmap);
+ }
+
+ /* Write the cache now */
+ error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
+ pba = msb->lba_to_pba_table[msb->cache_block_lba];
+
+ /* Mark invalid pages */
+ if (!error) {
+ for (page = 0; page < msb->pages_in_block; page++) {
+
+ if (test_bit(page, &msb->valid_cache_bitmap))
+ continue;
+
+ dbg("marking page %d as containing damaged data",
+ page);
+ msb_set_overwrite_flag(msb,
+ pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
+ }
+ }
+
+ msb_cache_discard(msb);
+ return error;
+}
+
+static int msb_cache_write(struct msb_data *msb, int lba,
+ int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
+{
+ int error;
+ struct scatterlist sg_tmp[10];
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID ||
+ lba != msb->cache_block_lba)
+ if (add_to_cache_only)
+ return 0;
+
+ /* If we need to write different block */
+ if (msb->cache_block_lba != MS_BLOCK_INVALID &&
+ lba != msb->cache_block_lba) {
+ dbg_verbose("first flush the cache");
+ error = msb_cache_flush(msb);
+ if (error)
+ return error;
+ }
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID) {
+ msb->cache_block_lba = lba;
+ mod_timer(&msb->cache_flush_timer,
+ jiffies + msecs_to_jiffies(cache_flush_timeout));
+ }
+
+ dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
+
+ sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+ msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
+
+ sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
+ msb->cache + page * msb->page_size, msb->page_size);
+
+ set_bit(page, &msb->valid_cache_bitmap);
+ return 0;
+}
+
+static int msb_cache_read(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, int offset)
+{
+ int pba = msb->lba_to_pba_table[lba];
+ struct scatterlist sg_tmp[10];
+ int error = 0;
+
+ if (lba == msb->cache_block_lba &&
+ test_bit(page, &msb->valid_cache_bitmap)) {
+
+ dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
+ lba, pba, page);
+
+ sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+ msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
+ offset, msb->page_size);
+ sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
+ msb->cache + msb->page_size * page,
+ msb->page_size);
+ } else {
+ dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
+ lba, pba, page);
+
+ error = msb_read_page(msb, pba, page, NULL, sg, offset);
+ if (error)
+ return error;
+
+ msb_cache_write(msb, lba, page, true, sg, offset);
+ }
+ return error;
+}
+
+/* Emulated geometry table
+ * This table content isn't that importaint,
+ * One could put here different values, providing that they still
+ * cover whole disk.
+ * 64 MB entry is what windows reports for my 64M memstick */
+
+static const struct chs_entry chs_table[] = {
+/* size sectors cylynders heads */
+ { 4, 16, 247, 2 },
+ { 8, 16, 495, 2 },
+ { 16, 16, 495, 4 },
+ { 32, 16, 991, 4 },
+ { 64, 16, 991, 8 },
+ {128, 16, 991, 16 },
+ { 0 }
+};
+
+/* Load information about the card */
+static int msb_init_card(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+ struct ms_boot_page *boot_block;
+ int error = 0, i, raw_size_in_megs;
+
+ msb->caps = 0;
+
+ if (card->id.class >= MEMSTICK_CLASS_ROM &&
+ card->id.class <= MEMSTICK_CLASS_ROM)
+ msb->read_only = true;
+
+ msb->state = -1;
+ error = msb_reset(msb, false);
+ if (error)
+ return error;
+
+ /* Due to a bug in Jmicron driver written by Alex Dubov,
+ its serial mode barely works,
+ so we switch to parallel mode right away */
+ if (host->caps & MEMSTICK_CAP_PAR4)
+ msb_switch_to_parallel(msb);
+
+ msb->page_size = sizeof(struct ms_boot_page);
+
+ /* Read the boot page */
+ error = msb_read_boot_blocks(msb);
+ if (error)
+ return -EIO;
+
+ boot_block = &msb->boot_page[0];
+
+ /* Save intersting attributes from boot page */
+ msb->block_count = boot_block->attr.number_of_blocks;
+ msb->page_size = boot_block->attr.page_size;
+
+ msb->pages_in_block = boot_block->attr.block_size * 2;
+ msb->block_size = msb->page_size * msb->pages_in_block;
+
+ if (msb->page_size > PAGE_SIZE) {
+ /* this isn't supported by linux at all, anyway*/
+ dbg("device page %d size isn't supported", msb->page_size);
+ return -EINVAL;
+ }
+
+ msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
+ if (!msb->block_buffer)
+ return -ENOMEM;
+
+ raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
+
+ for (i = 0; chs_table[i].size; i++) {
+
+ if (chs_table[i].size != raw_size_in_megs)
+ continue;
+
+ msb->geometry.cylinders = chs_table[i].cyl;
+ msb->geometry.heads = chs_table[i].head;
+ msb->geometry.sectors = chs_table[i].sec;
+ break;
+ }
+
+ if (boot_block->attr.transfer_supporting == 1)
+ msb->caps |= MEMSTICK_CAP_PAR4;
+
+ if (boot_block->attr.device_type & 0x03)
+ msb->read_only = true;
+
+ dbg("Total block count = %d", msb->block_count);
+ dbg("Each block consists of %d pages", msb->pages_in_block);
+ dbg("Page size = %d bytes", msb->page_size);
+ dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
+ dbg("Read only: %d", msb->read_only);
+
+#if 0
+ /* Now we can switch the interface */
+ if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
+ msb_switch_to_parallel(msb);
+#endif
+
+ error = msb_cache_init(msb);
+ if (error)
+ return error;
+
+ error = msb_ftl_initialize(msb);
+ if (error)
+ return error;
+
+
+ /* Read the bad block table */
+ error = msb_read_bad_block_table(msb, 0);
+
+ if (error && error != -ENOMEM) {
+ dbg("failed to read bad block table from primary boot block, trying from backup");
+ error = msb_read_bad_block_table(msb, 1);
+ }
+
+ if (error)
+ return error;
+
+ /* *drum roll* Scan the media */
+ error = msb_ftl_scan(msb);
+ if (error) {
+ pr_err("Scan of media failed");
+ return error;
+ }
+
+ return 0;
+
+}
+
+static int msb_do_write_request(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
+{
+ int error = 0;
+ off_t offset = 0;
+ *sucessfuly_written = 0;
+
+ while (offset < len) {
+ if (page == 0 && len - offset >= msb->block_size) {
+
+ if (msb->cache_block_lba == lba)
+ msb_cache_discard(msb);
+
+ dbg_verbose("Writing whole lba %d", lba);
+ error = msb_update_block(msb, lba, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->block_size;
+ *sucessfuly_written += msb->block_size;
+ lba++;
+ continue;
+ }
+
+ error = msb_cache_write(msb, lba, page, false, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->page_size;
+ *sucessfuly_written += msb->page_size;
+
+ page++;
+ if (page == msb->pages_in_block) {
+ page = 0;
+ lba++;
+ }
+ }
+ return 0;
+}
+
+static int msb_do_read_request(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, int len, int *sucessfuly_read)
+{
+ int error = 0;
+ int offset = 0;
+ *sucessfuly_read = 0;
+
+ while (offset < len) {
+
+ error = msb_cache_read(msb, lba, page, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->page_size;
+ *sucessfuly_read += msb->page_size;
+
+ page++;
+ if (page == msb->pages_in_block) {
+ page = 0;
+ lba++;
+ }
+ }
+ return 0;
+}
+
+static void msb_io_work(struct work_struct *work)
+{
+ struct msb_data *msb = container_of(work, struct msb_data, io_work);
+ int page, error, len;
+ sector_t lba;
+ unsigned long flags;
+ struct scatterlist *sg = msb->prealloc_sg;
+
+ dbg_verbose("IO: work started");
+
+ while (1) {
+ spin_lock_irqsave(&msb->q_lock, flags);
+
+ if (msb->need_flush_cache) {
+ msb->need_flush_cache = false;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ msb_cache_flush(msb);
+ continue;
+ }
+
+ if (!msb->req) {
+ msb->req = blk_fetch_request(msb->queue);
+ if (!msb->req) {
+ dbg_verbose("IO: no more requests exiting");
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* If card was removed meanwhile */
+ if (!msb->req)
+ return;
+
+ /* process the request */
+ dbg_verbose("IO: processing new request");
+ blk_rq_map_sg(msb->queue, msb->req, sg);
+
+ lba = blk_rq_pos(msb->req);
+
+ sector_div(lba, msb->page_size / 512);
+ page = do_div(lba, msb->pages_in_block);
+
+ if (rq_data_dir(msb->req) == READ)
+ error = msb_do_read_request(msb, lba, page, sg,
+ blk_rq_bytes(msb->req), &len);
+ else
+ error = msb_do_write_request(msb, lba, page, sg,
+ blk_rq_bytes(msb->req), &len);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+
+ if (len)
+ if (!__blk_end_request(msb->req, 0, len))
+ msb->req = NULL;
+
+ if (error && msb->req) {
+ dbg_verbose("IO: ending one sector of the request with error");
+ if (!__blk_end_request(msb->req, error, msb->page_size))
+ msb->req = NULL;
+ }
+
+ if (msb->req)
+ dbg_verbose("IO: request still pending");
+
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ }
+}
+
+static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
+static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
+
+static int msb_bd_open(struct block_device *bdev, fmode_t mode)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct msb_data *msb = disk->private_data;
+
+ dbg_verbose("block device open");
+
+ mutex_lock(&msb_disk_lock);
+
+ if (msb && msb->card)
+ msb->usage_count++;
+
+ mutex_unlock(&msb_disk_lock);
+ return 0;
+}
+
+static void msb_data_clear(struct msb_data *msb)
+{
+ kfree(msb->boot_page);
+ kfree(msb->used_blocks_bitmap);
+ kfree(msb->lba_to_pba_table);
+ kfree(msb->cache);
+ msb->card = NULL;
+}
+
+static int msb_disk_release(struct gendisk *disk)
+{
+ struct msb_data *msb = disk->private_data;
+
+ dbg_verbose("block device release");
+ mutex_lock(&msb_disk_lock);
+
+ if (msb) {
+ if (msb->usage_count)
+ msb->usage_count--;
+
+ if (!msb->usage_count) {
+ disk->private_data = NULL;
+ idr_remove(&msb_disk_idr, msb->disk_id);
+ put_disk(disk);
+ kfree(msb);
+ }
+ }
+ mutex_unlock(&msb_disk_lock);
+ return 0;
+}
+
+static void msb_bd_release(struct gendisk *disk, fmode_t mode)
+{
+ msb_disk_release(disk);
+}
+
+static int msb_bd_getgeo(struct block_device *bdev,
+ struct hd_geometry *geo)
+{
+ struct msb_data *msb = bdev->bd_disk->private_data;
+ *geo = msb->geometry;
+ return 0;
+}
+
+static int msb_prepare_req(struct request_queue *q, struct request *req)
+{
+ if (req->cmd_type != REQ_TYPE_FS &&
+ req->cmd_type != REQ_TYPE_BLOCK_PC) {
+ blk_dump_rq_flags(req, "MS unsupported request");
+ return BLKPREP_KILL;
+ }
+ req->cmd_flags |= REQ_DONTPREP;
+ return BLKPREP_OK;
+}
+
+static void msb_submit_req(struct request_queue *q)
+{
+ struct memstick_dev *card = q->queuedata;
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct request *req = NULL;
+
+ dbg_verbose("Submit request");
+
+ if (msb->card_dead) {
+ dbg("Refusing requests on removed card");
+
+ WARN_ON(!msb->io_queue_stopped);
+
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ return;
+ }
+
+ if (msb->req)
+ return;
+
+ if (!msb->io_queue_stopped)
+ queue_work(msb->io_queue, &msb->io_work);
+}
+
+static int msb_check_card(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ return (msb->card_dead == 0);
+}
+
+static void msb_stop(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ dbg("Stopping all msblock IO");
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_stop_queue(msb->queue);
+ msb->io_queue_stopped = true;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ del_timer_sync(&msb->cache_flush_timer);
+ flush_workqueue(msb->io_queue);
+
+ if (msb->req) {
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_requeue_request(msb->queue, msb->req);
+ msb->req = NULL;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ }
+
+}
+
+static void msb_start(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ dbg("Resuming IO from msblock");
+
+ msb_invalidate_reg_window(msb);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ if (!msb->io_queue_stopped || msb->card_dead) {
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* Kick cache flush anyway, its harmless */
+ msb->need_flush_cache = true;
+ msb->io_queue_stopped = false;
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_start_queue(msb->queue);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ queue_work(msb->io_queue, &msb->io_work);
+
+}
+
+static const struct block_device_operations msb_bdops = {
+ .open = msb_bd_open,
+ .release = msb_bd_release,
+ .getgeo = msb_bd_getgeo,
+ .owner = THIS_MODULE
+};
+
+/* Registers the block device */
+static int msb_init_disk(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+ int rc;
+ u64 limit = BLK_BOUNCE_HIGH;
+ unsigned long capacity;
+
+ if (host->dev.dma_mask && *(host->dev.dma_mask))
+ limit = *(host->dev.dma_mask);
+
+ mutex_lock(&msb_disk_lock);
+ msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
+ mutex_unlock(&msb_disk_lock);
+
+ if (msb->disk_id < 0)
+ return msb->disk_id;
+
+ msb->disk = alloc_disk(0);
+ if (!msb->disk) {
+ rc = -ENOMEM;
+ goto out_release_id;
+ }
+
+ msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
+ if (!msb->queue) {
+ rc = -ENOMEM;
+ goto out_put_disk;
+ }
+
+ msb->queue->queuedata = card;
+ blk_queue_prep_rq(msb->queue, msb_prepare_req);
+
+ blk_queue_bounce_limit(msb->queue, limit);
+ blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
+ blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
+ blk_queue_max_segment_size(msb->queue,
+ MS_BLOCK_MAX_PAGES * msb->page_size);
+ blk_queue_logical_block_size(msb->queue, msb->page_size);
+
+ sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
+ msb->disk->fops = &msb_bdops;
+ msb->disk->private_data = msb;
+ msb->disk->queue = msb->queue;
+ msb->disk->driverfs_dev = &card->dev;
+ msb->disk->flags |= GENHD_FL_EXT_DEVT;
+
+ capacity = msb->pages_in_block * msb->logical_block_count;
+ capacity *= (msb->page_size / 512);
+ set_capacity(msb->disk, capacity);
+ dbg("Set total disk size to %lu sectors", capacity);
+
+ msb->usage_count = 1;
+ msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
+ INIT_WORK(&msb->io_work, msb_io_work);
+ sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+ if (msb->read_only)
+ set_disk_ro(msb->disk, 1);
+
+ msb_start(card);
+ add_disk(msb->disk);
+ dbg("Disk added");
+ return 0;
+
+out_put_disk:
+ put_disk(msb->disk);
+out_release_id:
+ mutex_lock(&msb_disk_lock);
+ idr_remove(&msb_disk_idr, msb->disk_id);
+ mutex_unlock(&msb_disk_lock);
+ return rc;
+}
+
+static int msb_probe(struct memstick_dev *card)
+{
+ struct msb_data *msb;
+ int rc = 0;
+
+ msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+ if (!msb)
+ return -ENOMEM;
+ memstick_set_drvdata(card, msb);
+ msb->card = card;
+ spin_lock_init(&msb->q_lock);
+
+ rc = msb_init_card(card);
+ if (rc)
+ goto out_free;
+
+ rc = msb_init_disk(card);
+ if (!rc) {
+ card->check = msb_check_card;
+ card->stop = msb_stop;
+ card->start = msb_start;
+ return 0;
+ }
+out_free:
+ memstick_set_drvdata(card, NULL);
+ msb_data_clear(msb);
+ kfree(msb);
+ return rc;
+}
+
+static void msb_remove(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ if (!msb->io_queue_stopped)
+ msb_stop(card);
+
+ dbg("Removing the disk device");
+
+ /* Take care of unhandled + new requests from now on */
+ spin_lock_irqsave(&msb->q_lock, flags);
+ msb->card_dead = true;
+ blk_start_queue(msb->queue);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* Remove the disk */
+ del_gendisk(msb->disk);
+ blk_cleanup_queue(msb->queue);
+ msb->queue = NULL;
+
+ mutex_lock(&msb_disk_lock);
+ msb_data_clear(msb);
+ mutex_unlock(&msb_disk_lock);
+
+ msb_disk_release(msb->disk);
+ memstick_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int msb_suspend(struct memstick_dev *card, pm_message_t state)
+{
+ msb_stop(card);
+ return 0;
+}
+
+static int msb_resume(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct msb_data *new_msb = NULL;
+ bool card_dead = true;
+
+#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
+ msb->card_dead = true;
+ return 0;
+#endif
+ mutex_lock(&card->host->lock);
+
+ new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+ if (!new_msb)
+ goto out;
+
+ new_msb->card = card;
+ memstick_set_drvdata(card, new_msb);
+ spin_lock_init(&new_msb->q_lock);
+ sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+ if (msb_init_card(card))
+ goto out;
+
+ if (msb->block_size != new_msb->block_size)
+ goto out;
+
+ if (memcmp(msb->boot_page, new_msb->boot_page,
+ sizeof(struct ms_boot_page)))
+ goto out;
+
+ if (msb->logical_block_count != new_msb->logical_block_count ||
+ memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
+ msb->logical_block_count))
+ goto out;
+
+ if (msb->block_count != new_msb->block_count ||
+ memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+ msb->block_count / 8))
+ goto out;
+
+ card_dead = false;
+out:
+ if (card_dead)
+ dbg("Card was removed/replaced during suspend");
+
+ msb->card_dead = card_dead;
+ memstick_set_drvdata(card, msb);
+
+ if (new_msb) {
+ msb_data_clear(new_msb);
+ kfree(new_msb);
+ }
+
+ msb_start(card);
+ mutex_unlock(&card->host->lock);
+ return 0;
+}
+#else
+
+#define msb_suspend NULL
+#define msb_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct memstick_device_id msb_id_tbl[] = {
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_FLASH},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_ROM},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_RO},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_WP},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
+ MEMSTICK_CLASS_DUO},
+ {}
+};
+MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
+
+
+static struct memstick_driver msb_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE
+ },
+ .id_table = msb_id_tbl,
+ .probe = msb_probe,
+ .remove = msb_remove,
+ .suspend = msb_suspend,
+ .resume = msb_resume
+};
+
+static int major;
+
+static int __init msb_init(void)
+{
+ int rc = register_blkdev(0, DRIVER_NAME);
+
+ if (rc < 0) {
+ pr_err("failed to register major (error %d)\n", rc);
+ return rc;
+ }
+
+ major = rc;
+ rc = memstick_register_driver(&msb_driver);
+ if (rc) {
+ unregister_blkdev(major, DRIVER_NAME);
+ pr_err("failed to register memstick driver (error %d)\n", rc);
+ }
+
+ return rc;
+}
+
+static void __exit msb_exit(void)
+{
+ memstick_unregister_driver(&msb_driver);
+ unregister_blkdev(major, DRIVER_NAME);
+ idr_destroy(&msb_disk_idr);
+}
+
+module_init(msb_init);
+module_exit(msb_exit);
+
+module_param(cache_flush_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_flush_timeout,
+ "Cache flush timeout in msec (1000 default)");
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+module_param(verify_writes, bool, S_IRUGO);
+MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky");
+MODULE_DESCRIPTION("Sony MemoryStick block device driver");
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
new file mode 100644
index 0000000..96e6375
--- /dev/null
+++ b/drivers/memstick/core/ms_block.h
@@ -0,0 +1,290 @@
+/*
+ * ms_block.h - Sony MemoryStick (legacy) storage support
+
+ * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver are copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Also ms structures were copied from old broken driver by same author
+ * These probably come from MS spec
+ *
+ */
+
+#ifndef MS_BLOCK_NEW_H
+#define MS_BLOCK_NEW_H
+
+#define MS_BLOCK_MAX_SEGS 32
+#define MS_BLOCK_MAX_PAGES ((2 << 16) - 1)
+
+#define MS_BLOCK_MAX_BOOT_ADDR 0x000c
+#define MS_BLOCK_BOOT_ID 0x0001
+#define MS_BLOCK_INVALID 0xffff
+#define MS_MAX_ZONES 16
+#define MS_BLOCKS_IN_ZONE 512
+
+#define MS_BLOCK_MAP_LINE_SZ 16
+#define MS_BLOCK_PART_SHIFT 3
+
+
+#define MEMSTICK_UNCORR_ERROR (MEMSTICK_STATUS1_UCFG | \
+ MEMSTICK_STATUS1_UCEX | MEMSTICK_STATUS1_UCDT)
+
+#define MEMSTICK_CORR_ERROR (MEMSTICK_STATUS1_FGER | MEMSTICK_STATUS1_EXER | \
+ MEMSTICK_STATUS1_DTER)
+
+#define MEMSTICK_INT_ERROR (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)
+
+#define MEMSTICK_OVERWRITE_FLAG_NORMAL \
+ (MEMSTICK_OVERWRITE_PGST1 | \
+ MEMSTICK_OVERWRITE_PGST0 | \
+ MEMSTICK_OVERWRITE_BKST)
+
+#define MEMSTICK_OV_PG_NORMAL \
+ (MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
+
+#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
+ (MEMSTICK_MANAGEMENT_SYSFLG | \
+ MEMSTICK_MANAGEMENT_SCMS1 | \
+ MEMSTICK_MANAGEMENT_SCMS0) \
+
+struct ms_boot_header {
+ unsigned short block_id;
+ unsigned short format_reserved;
+ unsigned char reserved0[184];
+ unsigned char data_entry;
+ unsigned char reserved1[179];
+} __packed;
+
+
+struct ms_system_item {
+ unsigned int start_addr;
+ unsigned int data_size;
+ unsigned char data_type_id;
+ unsigned char reserved[3];
+} __packed;
+
+struct ms_system_entry {
+ struct ms_system_item disabled_block;
+ struct ms_system_item cis_idi;
+ unsigned char reserved[24];
+} __packed;
+
+struct ms_boot_attr_info {
+ unsigned char memorystick_class;
+ unsigned char format_unique_value1;
+ unsigned short block_size;
+ unsigned short number_of_blocks;
+ unsigned short number_of_effective_blocks;
+ unsigned short page_size;
+ unsigned char extra_data_size;
+ unsigned char format_unique_value2;
+ unsigned char assembly_time[8];
+ unsigned char format_unique_value3;
+ unsigned char serial_number[3];
+ unsigned char assembly_manufacturer_code;
+ unsigned char assembly_model_code[3];
+ unsigned short memory_manufacturer_code;
+ unsigned short memory_device_code;
+ unsigned short implemented_capacity;
+ unsigned char format_unique_value4[2];
+ unsigned char vcc;
+ unsigned char vpp;
+ unsigned short controller_number;
+ unsigned short controller_function;
+ unsigned char reserved0[9];
+ unsigned char transfer_supporting;
+ unsigned short format_unique_value5;
+ unsigned char format_type;
+ unsigned char memorystick_application;
+ unsigned char device_type;
+ unsigned char reserved1[22];
+ unsigned char format_uniqure_value6[2];
+ unsigned char reserved2[15];
+} __packed;
+
+struct ms_cis_idi {
+ unsigned short general_config;
+ unsigned short logical_cylinders;
+ unsigned short reserved0;
+ unsigned short logical_heads;
+ unsigned short track_size;
+ unsigned short page_size;
+ unsigned short pages_per_track;
+ unsigned short msw;
+ unsigned short lsw;
+ unsigned short reserved1;
+ unsigned char serial_number[20];
+ unsigned short buffer_type;
+ unsigned short buffer_size_increments;
+ unsigned short long_command_ecc;
+ unsigned char firmware_version[28];
+ unsigned char model_name[18];
+ unsigned short reserved2[5];
+ unsigned short pio_mode_number;
+ unsigned short dma_mode_number;
+ unsigned short field_validity;
+ unsigned short current_logical_cylinders;
+ unsigned short current_logical_heads;
+ unsigned short current_pages_per_track;
+ unsigned int current_page_capacity;
+ unsigned short mutiple_page_setting;
+ unsigned int addressable_pages;
+ unsigned short single_word_dma;
+ unsigned short multi_word_dma;
+ unsigned char reserved3[128];
+} __packed;
+
+
+struct ms_boot_page {
+ struct ms_boot_header header;
+ struct ms_system_entry entry;
+ struct ms_boot_attr_info attr;
+} __packed;
+
+struct msb_data {
+ unsigned int usage_count;
+ struct memstick_dev *card;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ spinlock_t q_lock;
+ struct hd_geometry geometry;
+ struct attribute_group attr_group;
+ struct request *req;
+ int caps;
+ int disk_id;
+
+ /* IO */
+ struct workqueue_struct *io_queue;
+ bool io_queue_stopped;
+ struct work_struct io_work;
+ bool card_dead;
+
+ /* Media properties */
+ struct ms_boot_page *boot_page;
+ u16 boot_block_locations[2];
+ int boot_block_count;
+
+ bool read_only;
+ unsigned short page_size;
+ int block_size;
+ int pages_in_block;
+ int zone_count;
+ int block_count;
+ int logical_block_count;
+
+ /* FTL tables */
+ unsigned long *used_blocks_bitmap;
+ unsigned long *erased_blocks_bitmap;
+ u16 *lba_to_pba_table;
+ int free_block_count[MS_MAX_ZONES];
+ bool ftl_initialized;
+
+ /* Cache */
+ unsigned char *cache;
+ unsigned long valid_cache_bitmap;
+ int cache_block_lba;
+ bool need_flush_cache;
+ struct timer_list cache_flush_timer;
+
+ /* Preallocated buffers */
+ unsigned char *block_buffer;
+ struct scatterlist prealloc_sg[MS_BLOCK_MAX_SEGS+1];
+
+
+ /* handler's local data */
+ struct ms_register_addr reg_addr;
+ bool addr_valid;
+
+ u8 command_value;
+ bool command_need_oob;
+ struct scatterlist *current_sg;
+ int current_sg_offset;
+
+ struct ms_register regs;
+ int current_page;
+
+ int state;
+ int exit_error;
+ bool int_polling;
+ unsigned long int_timeout;
+
+};
+
+enum msb_readpage_states {
+ MSB_RP_SEND_BLOCK_ADDRESS = 0,
+ MSB_RP_SEND_READ_COMMAND,
+
+ MSB_RP_SEND_INT_REQ,
+ MSB_RP_RECEIVE_INT_REQ_RESULT,
+
+ MSB_RP_SEND_READ_STATUS_REG,
+ MSB_RP_RECIVE_STATUS_REG,
+
+ MSB_RP_SEND_OOB_READ,
+ MSB_RP_RECEIVE_OOB_READ,
+
+ MSB_RP_SEND_READ_DATA,
+ MSB_RP_RECEIVE_READ_DATA,
+};
+
+enum msb_write_block_states {
+ MSB_WB_SEND_WRITE_PARAMS = 0,
+ MSB_WB_SEND_WRITE_OOB,
+ MSB_WB_SEND_WRITE_COMMAND,
+
+ MSB_WB_SEND_INT_REQ,
+ MSB_WB_RECEIVE_INT_REQ,
+
+ MSB_WB_SEND_WRITE_DATA,
+ MSB_WB_RECEIVE_WRITE_CONFIRMATION,
+};
+
+enum msb_send_command_states {
+ MSB_SC_SEND_WRITE_PARAMS,
+ MSB_SC_SEND_WRITE_OOB,
+ MSB_SC_SEND_COMMAND,
+
+ MSB_SC_SEND_INT_REQ,
+ MSB_SC_RECEIVE_INT_REQ,
+
+};
+
+enum msb_reset_states {
+ MSB_RS_SEND,
+ MSB_RS_CONFIRM,
+};
+
+enum msb_par_switch_states {
+ MSB_PS_SEND_SWITCH_COMMAND,
+ MSB_PS_SWICH_HOST,
+ MSB_PS_CONFIRM,
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned char sec;
+ unsigned short cyl;
+ unsigned char head;
+};
+
+static int msb_reset(struct msb_data *msb, bool full);
+
+static int h_msb_default_bad(struct memstick_dev *card,
+ struct memstick_request **mrq);
+
+#define __dbg(level, format, ...) \
+ do { \
+ if (debug >= level) \
+ pr_err(format "\n", ## __VA_ARGS__); \
+ } while (0)
+
+
+#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
+
+#endif
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 64a779c..25f8f93 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -1,6 +1,6 @@
/* Realtek PCI-Express Memstick Card Interface driver
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/module.h>
@@ -613,8 +612,6 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
memstick_remove_host(msh);
memstick_free_host(msh);
- platform_set_drvdata(pdev, NULL);
-
dev_dbg(&(pdev->dev),
": Realtek PCI-E Memstick controller has been removed\n");
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 6c95483..a65447d 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -333,9 +333,11 @@ static int device_rtc_init(struct pm80x_chip *chip,
{
int ret;
- rtc_devs[0].platform_data = pdata->rtc;
- rtc_devs[0].pdata_size =
- pdata->rtc ? sizeof(struct pm80x_rtc_pdata) : 0;
+ if (pdata) {
+ rtc_devs[0].platform_data = pdata->rtc;
+ rtc_devs[0].pdata_size =
+ pdata->rtc ? sizeof(struct pm80x_rtc_pdata) : 0;
+ }
ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
ARRAY_SIZE(rtc_devs), NULL, 0, NULL);
if (ret) {
@@ -541,7 +543,7 @@ static int pm800_probe(struct i2c_client *client,
{
int ret = 0;
struct pm80x_chip *chip;
- struct pm80x_platform_data *pdata = client->dev.platform_data;
+ struct pm80x_platform_data *pdata = dev_get_platdata(&client->dev);
struct pm80x_subchip *subchip;
ret = pm80x_init(client);
@@ -578,7 +580,7 @@ static int pm800_probe(struct i2c_client *client,
goto err_device_init;
}
- if (pdata->plat_config)
+ if (pdata && pdata->plat_config)
pdata->plat_config(chip, pdata);
return 0;
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index 5216022..8a5b6ff 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -227,7 +227,7 @@ static int pm805_probe(struct i2c_client *client,
{
int ret = 0;
struct pm80x_chip *chip;
- struct pm80x_platform_data *pdata = client->dev.platform_data;
+ struct pm80x_platform_data *pdata = dev_get_platdata(&client->dev);
ret = pm80x_init(client);
if (ret) {
@@ -243,7 +243,7 @@ static int pm805_probe(struct i2c_client *client,
goto err_805_init;
}
- if (pdata->plat_config)
+ if (pdata && pdata->plat_config)
pdata->plat_config(chip, pdata);
err_805_init:
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index eeb481d..7ebe9ef 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1130,7 +1130,7 @@ static int pm860x_dt_init(struct device_node *np,
static int pm860x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pm860x_platform_data *pdata = client->dev.platform_data;
+ struct pm860x_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *node = client->dev.of_node;
struct pm860x_chip *chip;
int ret;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index aecd6dd..914c3d1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -23,7 +23,7 @@ config MFD_AS3711
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
help
Support for the AS3711 PMIC from AMS
@@ -40,7 +40,7 @@ config PMIC_ADP5520
config MFD_AAT2870_CORE
bool "AnalogicTech AAT2870"
select MFD_CORE
- depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
+ depends on I2C=y && GPIOLIB
help
If you say yes here you get support for the AAT2870.
This driver provides common support for accessing the device,
@@ -78,7 +78,7 @@ config MFD_CROS_EC_SPI
config MFD_ASIC3
bool "Compaq ASIC3"
- depends on GENERIC_HARDIRQS && GPIOLIB && ARM
+ depends on GPIOLIB && ARM
select MFD_CORE
---help---
This driver supports the ASIC3 multifunction chip found on many
@@ -104,7 +104,7 @@ config MFD_DA9052_SPI
select REGMAP_SPI
select REGMAP_IRQ
select PMIC_DA9052
- depends on SPI_MASTER=y && GENERIC_HARDIRQS
+ depends on SPI_MASTER=y
help
Support for the Dialog Semiconductor DA9052 PMIC
when controlled using SPI. This driver provides common support
@@ -116,7 +116,7 @@ config MFD_DA9052_I2C
select REGMAP_I2C
select REGMAP_IRQ
select PMIC_DA9052
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
help
Support for the Dialog Semiconductor DA9052 PMIC
when controlled using I2C. This driver provides common support
@@ -128,7 +128,7 @@ config MFD_DA9055
select REGMAP_I2C
select REGMAP_IRQ
select MFD_CORE
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
help
Say yes here for support of Dialog Semiconductor DA9055. This is
a Power Management IC. This driver provides common support for
@@ -139,12 +139,24 @@ config MFD_DA9055
This driver can be built as a module. If built as a module it will be
called "da9055"
+config MFD_DA9063
+ bool "Dialog Semiconductor DA9063 PMIC Support"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C=y
+ help
+ Say yes here for support for the Dialog Semiconductor DA9063 PMIC.
+ This includes the I2C driver and core APIs.
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MC13783
tristate
config MFD_MC13XXX
tristate
- depends on (SPI_MASTER || I2C) && GENERIC_HARDIRQS
+ depends on (SPI_MASTER || I2C)
select MFD_CORE
select MFD_MC13783
help
@@ -155,7 +167,7 @@ config MFD_MC13XXX
config MFD_MC13XXX_SPI
tristate "Freescale MC13783 and MC13892 SPI interface"
- depends on SPI_MASTER && GENERIC_HARDIRQS
+ depends on SPI_MASTER
select REGMAP_SPI
select MFD_MC13XXX
help
@@ -163,7 +175,7 @@ config MFD_MC13XXX_SPI
config MFD_MC13XXX_I2C
tristate "Freescale MC13892 I2C interface"
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
select REGMAP_I2C
select MFD_MC13XXX
help
@@ -171,7 +183,7 @@ config MFD_MC13XXX_I2C
config HTC_EGPIO
bool "HTC EGPIO support"
- depends on GENERIC_HARDIRQS && GPIOLIB && ARM
+ depends on GPIOLIB && ARM
help
This driver supports the CPLD egpio chip present on
several HTC phones. It provides basic support for input
@@ -180,7 +192,6 @@ config HTC_EGPIO
config HTC_PASIC3
tristate "HTC PASIC3 LED/DS1WM chip support"
select MFD_CORE
- depends on GENERIC_HARDIRQS
help
This core driver provides register access for the LED/DS1WM
chips labeled "AIC2" and "AIC3", found on HTC Blueangel and
@@ -198,7 +209,7 @@ config HTC_I2CPLD
config LPC_ICH
tristate "Intel ICH LPC"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
select MFD_CORE
help
The LPC bridge function of the Intel ICH provides support for
@@ -208,7 +219,7 @@ config LPC_ICH
config LPC_SCH
tristate "Intel SCH LPC"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
select MFD_CORE
help
LPC bridge function of the Intel SCH provides support for
@@ -226,7 +237,7 @@ config MFD_INTEL_MSIC
config MFD_JANZ_CMODIO
tristate "Janz CMOD-IO PCI MODULbus Carrier Board"
select MFD_CORE
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
help
This is the core driver for the Janz CMOD-IO PCI MODULbus
carrier board. This device is a PCI to MODULbus bridge which may
@@ -265,7 +276,7 @@ config MFD_KEMPLD
config MFD_88PM800
tristate "Marvell 88PM800"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select REGMAP_I2C
select REGMAP_IRQ
select MFD_CORE
@@ -277,7 +288,7 @@ config MFD_88PM800
config MFD_88PM805
tristate "Marvell 88PM805"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select REGMAP_I2C
select REGMAP_IRQ
select MFD_CORE
@@ -289,7 +300,7 @@ config MFD_88PM805
config MFD_88PM860X
bool "Marvell 88PM8606/88PM8607"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select REGMAP_I2C
select MFD_CORE
help
@@ -300,7 +311,7 @@ config MFD_88PM860X
config MFD_MAX77686
bool "Maxim Semiconductor MAX77686 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select IRQ_DOMAIN
@@ -313,7 +324,7 @@ config MFD_MAX77686
config MFD_MAX77693
bool "Maxim Semiconductor MAX77693 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
help
@@ -327,7 +338,7 @@ config MFD_MAX77693
config MFD_MAX8907
tristate "Maxim Semiconductor MAX8907 PMIC Support"
select MFD_CORE
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select REGMAP_I2C
select REGMAP_IRQ
help
@@ -338,7 +349,7 @@ config MFD_MAX8907
config MFD_MAX8925
bool "Maxim Semiconductor MAX8925 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
help
Say yes here to support for Maxim Semiconductor MAX8925. This is
@@ -348,7 +359,7 @@ config MFD_MAX8925
config MFD_MAX8997
bool "Maxim Semiconductor MAX8997/8966 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select IRQ_DOMAIN
help
@@ -361,7 +372,7 @@ config MFD_MAX8997
config MFD_MAX8998
bool "Maxim Semiconductor MAX8998/National LP3974 PMIC Support"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select IRQ_DOMAIN
help
@@ -373,7 +384,7 @@ config MFD_MAX8998
config EZX_PCAP
bool "Motorola EZXPCAP Support"
- depends on GENERIC_HARDIRQS && SPI_MASTER
+ depends on SPI_MASTER
help
This enables the PCAP ASIC present on EZX Phones. This is
needed for MMC, TouchScreen, Sound, USB, etc..
@@ -381,7 +392,7 @@ config EZX_PCAP
config MFD_VIPERBOARD
tristate "Nano River Technologies Viperboard"
select MFD_CORE
- depends on USB && GENERIC_HARDIRQS
+ depends on USB
default n
help
Say yes here if you want support for Nano River Technologies
@@ -395,7 +406,7 @@ config MFD_VIPERBOARD
config MFD_RETU
tristate "Nokia Retu and Tahvo multi-function device"
select MFD_CORE
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
select REGMAP_IRQ
help
Retu and Tahvo are a multi-function devices found on Nokia
@@ -468,7 +479,7 @@ config MFD_PM8XXX_IRQ
config MFD_RDC321X
tristate "RDC R-321x southbridge"
select MFD_CORE
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
help
Say yes here if you want to have support for the RDC R-321x SoC
southbridge which provides access to GPIOs and Watchdog using the
@@ -476,7 +487,7 @@ config MFD_RDC321X
config MFD_RTSX_PCI
tristate "Realtek PCI-E card reader"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
select MFD_CORE
help
This supports for Realtek PCI-Express card reader including rts5209,
@@ -486,7 +497,7 @@ config MFD_RTSX_PCI
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
help
@@ -500,7 +511,7 @@ config MFD_RC5T583
config MFD_SEC_CORE
bool "SAMSUNG Electronics PMIC Series Support"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -543,7 +554,7 @@ config MFD_SM501_GPIO
config MFD_SMSC
bool "SMSC ECE1099 series chips"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
help
@@ -565,7 +576,7 @@ config ABX500_CORE
config AB3100_CORE
bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
- depends on I2C=y && ABX500_CORE && GENERIC_HARDIRQS
+ depends on I2C=y && ABX500_CORE
select MFD_CORE
default y if ARCH_U300
help
@@ -589,7 +600,7 @@ config AB3100_OTP
config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
- depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
+ depends on ABX500_CORE && MFD_DB8500_PRCMU
select POWER_SUPPLY
select MFD_CORE
select IRQ_DOMAIN
@@ -627,7 +638,7 @@ config MFD_DB8500_PRCMU
config MFD_STMPE
bool "STMicroelectronics STMPE"
- depends on (I2C=y || SPI_MASTER=y) && GENERIC_HARDIRQS
+ depends on (I2C=y || SPI_MASTER=y)
select MFD_CORE
help
Support for the STMPE family of I/O Expanders from
@@ -668,7 +679,7 @@ endmenu
config MFD_STA2X11
bool "STMicroelectronics STA2X11"
- depends on STA2X11 && GENERIC_HARDIRQS
+ depends on STA2X11
select MFD_CORE
select REGMAP_MMIO
@@ -688,7 +699,6 @@ config MFD_TI_AM335X_TSCADC
select MFD_CORE
select REGMAP
select REGMAP_MMIO
- depends on GENERIC_HARDIRQS
help
If you say yes here you get support for Texas Instruments series
of Touch Screen /ADC chips.
@@ -705,7 +715,7 @@ config MFD_DM355EVM_MSP
config MFD_LP8788
bool "TI LP8788 Power Management Unit Driver"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select IRQ_DOMAIN
@@ -727,14 +737,14 @@ config MFD_PALMAS
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
help
If you say yes here you get support for the Palmas
series of PMIC chips from Texas Instruments.
config MFD_TI_SSP
tristate "TI Sequencer Serial Port support"
- depends on ARCH_DAVINCI_TNETV107X && GENERIC_HARDIRQS
+ depends on ARCH_DAVINCI_TNETV107X
select MFD_CORE
---help---
Say Y here if you want support for the Sequencer Serial Port
@@ -749,7 +759,6 @@ config TPS6105X
select REGULATOR
select MFD_CORE
select REGULATOR_FIXED_VOLTAGE
- depends on GENERIC_HARDIRQS
help
This option enables a driver for the TP61050/TPS61052
high-power "white LED driver". This boost converter is
@@ -772,7 +781,7 @@ config TPS65010
config TPS6507X
tristate "TI TPS6507x Power Management / Touch Screen chips"
select MFD_CORE
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
help
If you say yes here you get support for the TPS6507x series of
Power Management / Touch Screen chips. These include voltage
@@ -786,7 +795,7 @@ config TPS65911_COMPARATOR
config MFD_TPS65090
bool "TI TPS65090 Power Management chips"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -799,7 +808,7 @@ config MFD_TPS65090
config MFD_TPS65217
tristate "TI TPS65217 Power Management / White LED chips"
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
select MFD_CORE
select REGMAP_I2C
help
@@ -814,7 +823,7 @@ config MFD_TPS65217
config MFD_TPS6586X
bool "TI TPS6586x Power Management chips"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
help
@@ -829,7 +838,7 @@ config MFD_TPS6586X
config MFD_TPS65910
bool "TI TPS65910 Power Management chip"
- depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
+ depends on I2C=y && GPIOLIB
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -850,7 +859,7 @@ config MFD_TPS65912_I2C
bool "TI TPS65912 Power Management chip with I2C"
select MFD_CORE
select MFD_TPS65912
- depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
+ depends on I2C=y && GPIOLIB
help
If you say yes here you get support for the TPS65912 series of
PM chips with I2C interface.
@@ -859,14 +868,14 @@ config MFD_TPS65912_SPI
bool "TI TPS65912 Power Management chip with SPI"
select MFD_CORE
select MFD_TPS65912
- depends on SPI_MASTER && GPIOLIB && GENERIC_HARDIRQS
+ depends on SPI_MASTER && GPIOLIB
help
If you say yes here you get support for the TPS65912 series of
PM chips with SPI interface.
config MFD_TPS80031
bool "TI TPS80031/TPS80032 Power Management chips"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -880,7 +889,7 @@ config MFD_TPS80031
config TWL4030_CORE
bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 Support"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select IRQ_DOMAIN
select REGMAP_I2C
help
@@ -919,13 +928,13 @@ config TWL4030_POWER
config MFD_TWL4030_AUDIO
bool "TI TWL4030 Audio"
- depends on TWL4030_CORE && GENERIC_HARDIRQS
+ depends on TWL4030_CORE
select MFD_CORE
default n
config TWL6040_CORE
bool "TI TWL6040 audio codec"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -949,7 +958,7 @@ config MENELAUS
config MFD_WL1273_CORE
tristate "TI WL1273 FM radio"
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
select MFD_CORE
default n
help
@@ -962,7 +971,6 @@ config MFD_LM3533
depends on I2C
select MFD_CORE
select REGMAP_I2C
- depends on GENERIC_HARDIRQS
help
Say yes here to enable support for National Semiconductor / TI
LM3533 Lighting Power chips.
@@ -984,7 +992,7 @@ config MFD_TIMBERDALE
config MFD_TC3589X
bool "Toshiba TC35892 and variants"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
help
Support for the Toshiba TC35892 and variants I/O Expander.
@@ -999,7 +1007,7 @@ config MFD_TMIO
config MFD_T7L66XB
bool "Toshiba T7L66XB"
- depends on ARM && HAVE_CLK && GENERIC_HARDIRQS
+ depends on ARM && HAVE_CLK
select MFD_CORE
select MFD_TMIO
help
@@ -1024,7 +1032,7 @@ config MFD_TC6393XB
config MFD_VX855
tristate "VIA VX855/VX875 integrated south bridge"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
select MFD_CORE
help
Say yes here to enable support for various functions of the
@@ -1042,7 +1050,7 @@ config MFD_ARIZONA_I2C
select MFD_ARIZONA
select MFD_CORE
select REGMAP_I2C
- depends on I2C && GENERIC_HARDIRQS
+ depends on I2C
help
Support for the Wolfson Microelectronics Arizona platform audio SoC
core functionality controlled via I2C.
@@ -1052,7 +1060,7 @@ config MFD_ARIZONA_SPI
select MFD_ARIZONA
select MFD_CORE
select REGMAP_SPI
- depends on SPI_MASTER && GENERIC_HARDIRQS
+ depends on SPI_MASTER
help
Support for the Wolfson Microelectronics Arizona platform audio SoC
core functionality controlled via I2C.
@@ -1070,7 +1078,7 @@ config MFD_WM5110
Support for Wolfson Microelectronics WM5110 low power audio SoC
config MFD_WM8997
- bool "Support Wolfson Microelectronics WM8997"
+ bool "Wolfson Microelectronics WM8997"
depends on MFD_ARIZONA
help
Support for Wolfson Microelectronics WM8997 low power audio SoC
@@ -1078,7 +1086,7 @@ config MFD_WM8997
config MFD_WM8400
bool "Wolfson Microelectronics WM8400"
select MFD_CORE
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select REGMAP_I2C
help
Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -1088,7 +1096,6 @@ config MFD_WM8400
config MFD_WM831X
bool
- depends on GENERIC_HARDIRQS
config MFD_WM831X_I2C
bool "Wolfson Microelectronics WM831x/2x PMICs with I2C"
@@ -1096,7 +1103,7 @@ config MFD_WM831X_I2C
select MFD_WM831X
select REGMAP_I2C
select IRQ_DOMAIN
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
when controlled using I2C. This driver provides common support
@@ -1109,7 +1116,7 @@ config MFD_WM831X_SPI
select MFD_WM831X
select REGMAP_SPI
select IRQ_DOMAIN
- depends on SPI_MASTER && GENERIC_HARDIRQS
+ depends on SPI_MASTER
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
when controlled using SPI. This driver provides common support
@@ -1118,12 +1125,11 @@ config MFD_WM831X_SPI
config MFD_WM8350
bool
- depends on GENERIC_HARDIRQS
config MFD_WM8350_I2C
bool "Wolfson Microelectronics WM8350 with I2C"
select MFD_WM8350
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
help
The WM8350 is an integrated audio and power management
subsystem with watchdog and RTC functionality for embedded
@@ -1136,7 +1142,7 @@ config MFD_WM8994
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
help
The WM8994 is a highly integrated hi-fi CODEC designed for
smartphone applicatiosn. As well as audio functionality it
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 3c90051..15b905c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -107,6 +107,9 @@ obj-$(CONFIG_MFD_LP8788) += lp8788.o lp8788-irq.o
da9055-objs := da9055-core.o da9055-i2c.o
obj-$(CONFIG_MFD_DA9055) += da9055.o
+da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o
+obj-$(CONFIG_MFD_DA9063) += da9063.o
+
obj-$(CONFIG_MFD_MAX77686) += max77686.o max77686-irq.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o max77693-irq.o
obj-$(CONFIG_MFD_MAX8907) += max8907.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index d4f5945..6f68472 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -363,7 +363,7 @@ static inline void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
static int aat2870_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct aat2870_platform_data *pdata = client->dev.platform_data;
+ struct aat2870_platform_data *pdata = dev_get_platdata(&client->dev);
struct aat2870_data *aat2870;
int i, j;
int ret = 0;
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index ddc669d..b348ae5 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -854,7 +854,7 @@ static int ab3100_probe(struct i2c_client *client,
{
struct ab3100 *ab3100;
struct ab3100_platform_data *ab3100_plf_data =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
int err;
int i;
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 7d1f1b0..e33e385 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -159,7 +159,7 @@ static struct hwreg_cfg hwreg_cfg = {
static struct ab8500_prcmu_ranges *debug_ranges;
-struct ab8500_prcmu_ranges ab8500_debug_ranges[AB8500_NUM_BANKS] = {
+static struct ab8500_prcmu_ranges ab8500_debug_ranges[AB8500_NUM_BANKS] = {
[0x0] = {
.num_ranges = 0,
.range = NULL,
@@ -488,7 +488,7 @@ struct ab8500_prcmu_ranges ab8500_debug_ranges[AB8500_NUM_BANKS] = {
},
};
-struct ab8500_prcmu_ranges ab8505_debug_ranges[AB8500_NUM_BANKS] = {
+static struct ab8500_prcmu_ranges ab8505_debug_ranges[AB8500_NUM_BANKS] = {
[0x0] = {
.num_ranges = 0,
.range = NULL,
@@ -847,7 +847,7 @@ struct ab8500_prcmu_ranges ab8505_debug_ranges[AB8500_NUM_BANKS] = {
},
};
-struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
+static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
[AB8500_M_FSM_RANK] = {
.num_ranges = 1,
.range = (struct ab8500_reg_range[]) {
@@ -1377,7 +1377,7 @@ void ab8500_dump_all_banks(struct device *dev)
/* Space for 500 registers. */
#define DUMP_MAX_REGS 700
-struct ab8500_register_dump
+static struct ab8500_register_dump
{
u8 bank;
u8 reg;
@@ -2800,7 +2800,13 @@ static ssize_t ab8500_subscribe_write(struct file *file,
*/
dev_attr[irq_index] = kmalloc(sizeof(struct device_attribute),
GFP_KERNEL);
+ if (!dev_attr[irq_index])
+ return -ENOMEM;
+
event_name[irq_index] = kmalloc(count, GFP_KERNEL);
+ if (!event_name[irq_index])
+ return -ENOMEM;
+
sprintf(event_name[irq_index], "%lu", user_val);
dev_attr[irq_index]->show = show_irq;
dev_attr[irq_index]->store = NULL;
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 7623e91..36000f9 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -867,6 +867,7 @@ static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
gpadc->cal_data[ADC_INPUT_VBAT].offset);
}
+#ifdef CONFIG_PM_RUNTIME
static int ab8500_gpadc_runtime_suspend(struct device *dev)
{
struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
@@ -885,7 +886,9 @@ static int ab8500_gpadc_runtime_resume(struct device *dev)
dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
return ret;
}
+#endif
+#ifdef CONFIG_PM_SLEEP
static int ab8500_gpadc_suspend(struct device *dev)
{
struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
@@ -913,6 +916,7 @@ static int ab8500_gpadc_resume(struct device *dev)
mutex_unlock(&gpadc->ab8500_gpadc_lock);
return ret;
}
+#endif
static int ab8500_gpadc_probe(struct platform_device *pdev)
{
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 28346ad..6250155 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -207,7 +207,7 @@ static int adp5520_remove_subdevs(struct adp5520_chip *chip)
static int adp5520_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct adp5520_platform_data *pdata = client->dev.platform_data;
+ struct adp5520_platform_data *pdata = dev_get_platdata(&client->dev);
struct platform_device *pdev;
struct adp5520_chip *chip;
int ret;
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 89a1153..5ac3aa4 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -438,9 +438,9 @@ static int arizona_runtime_suspend(struct device *dev)
}
}
- regulator_disable(arizona->dcvdd);
regcache_cache_only(arizona->regmap, true);
regcache_mark_dirty(arizona->regmap);
+ regulator_disable(arizona->dcvdd);
return 0;
}
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index 01e4141..abd3ab7 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -129,7 +129,7 @@ static int as3711_i2c_probe(struct i2c_client *client,
int ret;
if (!client->dev.of_node) {
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!pdata)
dev_dbg(&client->dev, "Platform data not found\n");
} else {
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 9532f74..fa22154 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -952,7 +952,7 @@ static void asic3_mfd_remove(struct platform_device *pdev)
/* Core */
static int __init asic3_probe(struct platform_device *pdev)
{
- struct asic3_platform_data *pdata = pdev->dev.platform_data;
+ struct asic3_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct asic3 *asic;
struct resource *mem;
unsigned long clksel;
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index f1a316e..e0a2e0e 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -494,7 +494,7 @@ failed:
static int da903x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct da903x_platform_data *pdata = client->dev.platform_data;
+ struct da903x_platform_data *pdata = dev_get_platdata(&client->dev);
struct da903x_chip *chip;
unsigned int tmp;
int ret;
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index a3c9613..ea28a33 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(da9052_regmap_config);
int da9052_device_init(struct da9052 *da9052, u8 chip_id)
{
- struct da9052_pdata *pdata = da9052->dev->platform_data;
+ struct da9052_pdata *pdata = dev_get_platdata(da9052->dev);
int ret;
mutex_init(&da9052->auxadc_lock);
diff --git a/drivers/mfd/da9055-core.c b/drivers/mfd/da9055-core.c
index 49cb23d..d3670cd 100644
--- a/drivers/mfd/da9055-core.c
+++ b/drivers/mfd/da9055-core.c
@@ -379,8 +379,9 @@ static struct regmap_irq_chip da9055_regmap_irq_chip = {
int da9055_device_init(struct da9055 *da9055)
{
- struct da9055_pdata *pdata = da9055->dev->platform_data;
+ struct da9055_pdata *pdata = dev_get_platdata(da9055->dev);
int ret;
+ uint8_t clear_events[3] = {0xFF, 0xFF, 0xFF};
if (pdata && pdata->init != NULL)
pdata->init(da9055);
@@ -390,6 +391,10 @@ int da9055_device_init(struct da9055 *da9055)
else
da9055->irq_base = pdata->irq_base;
+ ret = da9055_group_write(da9055, DA9055_REG_EVENT_A, 3, clear_events);
+ if (ret < 0)
+ return ret;
+
ret = regmap_add_irq_chip(da9055->regmap, da9055->chip_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
da9055->irq_base, &da9055_regmap_irq_chip,
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 607387f..13af7e5 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -54,7 +54,7 @@ static int da9055_i2c_remove(struct i2c_client *i2c)
}
static struct i2c_device_id da9055_i2c_id[] = {
- {"da9055-pmic", 0},
+ {"da9055", 0},
{ }
};
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
new file mode 100644
index 0000000..c9cf8d9
--- /dev/null
+++ b/drivers/mfd/da9063-core.c
@@ -0,0 +1,185 @@
+/*
+ * da9063-core.c: Device access for Dialog DA9063 modules
+ *
+ * Copyright 2012 Dialog Semiconductors Ltd.
+ * Copyright 2013 Philipp Zabel, Pengutronix
+ *
+ * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>,
+ * Michal Hajduk <michal.hajduk@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/da9063/core.h>
+#include <linux/mfd/da9063/pdata.h>
+#include <linux/mfd/da9063/registers.h>
+
+#include <linux/proc_fs.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+
+
+static struct resource da9063_regulators_resources[] = {
+ {
+ .name = "LDO_LIM",
+ .start = DA9063_IRQ_LDO_LIM,
+ .end = DA9063_IRQ_LDO_LIM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource da9063_rtc_resources[] = {
+ {
+ .name = "ALARM",
+ .start = DA9063_IRQ_ALARM,
+ .end = DA9063_IRQ_ALARM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "TICK",
+ .start = DA9063_IRQ_TICK,
+ .end = DA9063_IRQ_TICK,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct resource da9063_onkey_resources[] = {
+ {
+ .start = DA9063_IRQ_ONKEY,
+ .end = DA9063_IRQ_ONKEY,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource da9063_hwmon_resources[] = {
+ {
+ .start = DA9063_IRQ_ADC_RDY,
+ .end = DA9063_IRQ_ADC_RDY,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+
+static struct mfd_cell da9063_devs[] = {
+ {
+ .name = DA9063_DRVNAME_REGULATORS,
+ .num_resources = ARRAY_SIZE(da9063_regulators_resources),
+ .resources = da9063_regulators_resources,
+ },
+ {
+ .name = DA9063_DRVNAME_LEDS,
+ },
+ {
+ .name = DA9063_DRVNAME_WATCHDOG,
+ },
+ {
+ .name = DA9063_DRVNAME_HWMON,
+ .num_resources = ARRAY_SIZE(da9063_hwmon_resources),
+ .resources = da9063_hwmon_resources,
+ },
+ {
+ .name = DA9063_DRVNAME_ONKEY,
+ .num_resources = ARRAY_SIZE(da9063_onkey_resources),
+ .resources = da9063_onkey_resources,
+ },
+ {
+ .name = DA9063_DRVNAME_RTC,
+ .num_resources = ARRAY_SIZE(da9063_rtc_resources),
+ .resources = da9063_rtc_resources,
+ },
+ {
+ .name = DA9063_DRVNAME_VIBRATION,
+ },
+};
+
+int da9063_device_init(struct da9063 *da9063, unsigned int irq)
+{
+ struct da9063_pdata *pdata = da9063->dev->platform_data;
+ int model, revision;
+ int ret;
+
+ if (pdata) {
+ da9063->flags = pdata->flags;
+ da9063->irq_base = pdata->irq_base;
+ } else {
+ da9063->flags = 0;
+ da9063->irq_base = 0;
+ }
+ da9063->chip_irq = irq;
+
+ if (pdata && pdata->init != NULL) {
+ ret = pdata->init(da9063);
+ if (ret != 0) {
+ dev_err(da9063->dev,
+ "Platform initialization failed.\n");
+ return ret;
+ }
+ }
+
+ ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model);
+ if (ret < 0) {
+ dev_err(da9063->dev, "Cannot read chip model id.\n");
+ return -EIO;
+ }
+ if (model != PMIC_DA9063) {
+ dev_err(da9063->dev, "Invalid chip model id: 0x%02x\n", model);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &revision);
+ if (ret < 0) {
+ dev_err(da9063->dev, "Cannot read chip revision id.\n");
+ return -EIO;
+ }
+ revision >>= DA9063_CHIP_VARIANT_SHIFT;
+ if (revision != 3) {
+ dev_err(da9063->dev, "Unknown chip revision: %d\n", revision);
+ return -ENODEV;
+ }
+
+ da9063->model = model;
+ da9063->revision = revision;
+
+ dev_info(da9063->dev,
+ "Device detected (model-ID: 0x%02X rev-ID: 0x%02X)\n",
+ model, revision);
+
+ ret = da9063_irq_init(da9063);
+ if (ret) {
+ dev_err(da9063->dev, "Cannot initialize interrupts.\n");
+ return ret;
+ }
+
+ ret = mfd_add_devices(da9063->dev, -1, da9063_devs,
+ ARRAY_SIZE(da9063_devs), NULL, da9063->irq_base,
+ NULL);
+ if (ret)
+ dev_err(da9063->dev, "Cannot add MFD cells\n");
+
+ return ret;
+}
+
+void da9063_device_exit(struct da9063 *da9063)
+{
+ mfd_remove_devices(da9063->dev);
+ da9063_irq_exit(da9063);
+}
+
+MODULE_DESCRIPTION("PMIC driver for Dialog DA9063");
+MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>, Michal Hajduk <michal.hajduk@diasemi.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
new file mode 100644
index 0000000..8db5c80
--- /dev/null
+++ b/drivers/mfd/da9063-i2c.c
@@ -0,0 +1,182 @@
+/* da9063-i2c.c: Interrupt support for Dialog DA9063
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ * Copyright 2013 Philipp Zabel, Pengutronix
+ *
+ * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/da9063/core.h>
+#include <linux/mfd/da9063/pdata.h>
+#include <linux/mfd/da9063/registers.h>
+
+static const struct regmap_range da9063_readable_ranges[] = {
+ {
+ .range_min = DA9063_REG_PAGE_CON,
+ .range_max = DA9063_REG_SECOND_D,
+ }, {
+ .range_min = DA9063_REG_SEQ,
+ .range_max = DA9063_REG_ID_32_31,
+ }, {
+ .range_min = DA9063_REG_SEQ_A,
+ .range_max = DA9063_REG_AUTO3_LOW,
+ }, {
+ .range_min = DA9063_REG_T_OFFSET,
+ .range_max = DA9063_REG_GP_ID_19,
+ }, {
+ .range_min = DA9063_REG_CHIP_ID,
+ .range_max = DA9063_REG_CHIP_VARIANT,
+ },
+};
+
+static const struct regmap_range da9063_writeable_ranges[] = {
+ {
+ .range_min = DA9063_REG_PAGE_CON,
+ .range_max = DA9063_REG_PAGE_CON,
+ }, {
+ .range_min = DA9063_REG_FAULT_LOG,
+ .range_max = DA9063_REG_VSYS_MON,
+ }, {
+ .range_min = DA9063_REG_COUNT_S,
+ .range_max = DA9063_REG_ALARM_Y,
+ }, {
+ .range_min = DA9063_REG_SEQ,
+ .range_max = DA9063_REG_ID_32_31,
+ }, {
+ .range_min = DA9063_REG_SEQ_A,
+ .range_max = DA9063_REG_AUTO3_LOW,
+ }, {
+ .range_min = DA9063_REG_CONFIG_I,
+ .range_max = DA9063_REG_MON_REG_4,
+ }, {
+ .range_min = DA9063_REG_GP_ID_0,
+ .range_max = DA9063_REG_GP_ID_19,
+ },
+};
+
+static const struct regmap_range da9063_volatile_ranges[] = {
+ {
+ .range_min = DA9063_REG_STATUS_A,
+ .range_max = DA9063_REG_EVENT_D,
+ }, {
+ .range_min = DA9063_REG_CONTROL_F,
+ .range_max = DA9063_REG_CONTROL_F,
+ }, {
+ .range_min = DA9063_REG_ADC_MAN,
+ .range_max = DA9063_REG_ADC_MAN,
+ }, {
+ .range_min = DA9063_REG_ADC_RES_L,
+ .range_max = DA9063_REG_SECOND_D,
+ }, {
+ .range_min = DA9063_REG_MON_REG_5,
+ .range_max = DA9063_REG_MON_REG_6,
+ },
+};
+
+static const struct regmap_access_table da9063_readable_table = {
+ .yes_ranges = da9063_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_readable_ranges),
+};
+
+static const struct regmap_access_table da9063_writeable_table = {
+ .yes_ranges = da9063_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_writeable_ranges),
+};
+
+static const struct regmap_access_table da9063_volatile_table = {
+ .yes_ranges = da9063_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_volatile_ranges),
+};
+
+static const struct regmap_range_cfg da9063_range_cfg[] = {
+ {
+ .range_min = DA9063_REG_PAGE_CON,
+ .range_max = DA9063_REG_CHIP_VARIANT,
+ .selector_reg = DA9063_REG_PAGE_CON,
+ .selector_mask = 1 << DA9063_I2C_PAGE_SEL_SHIFT,
+ .selector_shift = DA9063_I2C_PAGE_SEL_SHIFT,
+ .window_start = 0,
+ .window_len = 256,
+ }
+};
+
+static struct regmap_config da9063_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .ranges = da9063_range_cfg,
+ .num_ranges = ARRAY_SIZE(da9063_range_cfg),
+ .max_register = DA9063_REG_CHIP_VARIANT,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .rd_table = &da9063_readable_table,
+ .wr_table = &da9063_writeable_table,
+ .volatile_table = &da9063_volatile_table,
+};
+
+static int da9063_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct da9063 *da9063;
+ int ret;
+
+ da9063 = devm_kzalloc(&i2c->dev, sizeof(struct da9063), GFP_KERNEL);
+ if (da9063 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, da9063);
+ da9063->dev = &i2c->dev;
+ da9063->chip_irq = i2c->irq;
+
+ da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config);
+ if (IS_ERR(da9063->regmap)) {
+ ret = PTR_ERR(da9063->regmap);
+ dev_err(da9063->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ return da9063_device_init(da9063, i2c->irq);
+}
+
+static int da9063_i2c_remove(struct i2c_client *i2c)
+{
+ struct da9063 *da9063 = i2c_get_clientdata(i2c);
+
+ da9063_device_exit(da9063);
+
+ return 0;
+}
+
+static const struct i2c_device_id da9063_i2c_id[] = {
+ {"da9063", PMIC_DA9063},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, da9063_i2c_id);
+
+static struct i2c_driver da9063_i2c_driver = {
+ .driver = {
+ .name = "da9063",
+ .owner = THIS_MODULE,
+ },
+ .probe = da9063_i2c_probe,
+ .remove = da9063_i2c_remove,
+ .id_table = da9063_i2c_id,
+};
+
+module_i2c_driver(da9063_i2c_driver);
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
new file mode 100644
index 0000000..8229226
--- /dev/null
+++ b/drivers/mfd/da9063-irq.c
@@ -0,0 +1,193 @@
+/* da9063-irq.c: Interrupts support for Dialog DA9063
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ * Copyright 2013 Philipp Zabel, Pengutronix
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/da9063/core.h>
+#include <linux/mfd/da9063/pdata.h>
+
+#define DA9063_REG_EVENT_A_OFFSET 0
+#define DA9063_REG_EVENT_B_OFFSET 1
+#define DA9063_REG_EVENT_C_OFFSET 2
+#define DA9063_REG_EVENT_D_OFFSET 3
+#define EVENTS_BUF_LEN 4
+
+static const u8 mask_events_buf[] = { [0 ... (EVENTS_BUF_LEN - 1)] = ~0 };
+
+struct da9063_irq_data {
+ u16 reg;
+ u8 mask;
+};
+
+static struct regmap_irq da9063_irqs[] = {
+ /* DA9063 event A register */
+ [DA9063_IRQ_ONKEY] = {
+ .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+ .mask = DA9063_M_ONKEY,
+ },
+ [DA9063_IRQ_ALARM] = {
+ .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+ .mask = DA9063_M_ALARM,
+ },
+ [DA9063_IRQ_TICK] = {
+ .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+ .mask = DA9063_M_TICK,
+ },
+ [DA9063_IRQ_ADC_RDY] = {
+ .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+ .mask = DA9063_M_ADC_RDY,
+ },
+ [DA9063_IRQ_SEQ_RDY] = {
+ .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+ .mask = DA9063_M_SEQ_RDY,
+ },
+ /* DA9063 event B register */
+ [DA9063_IRQ_WAKE] = {
+ .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+ .mask = DA9063_M_WAKE,
+ },
+ [DA9063_IRQ_TEMP] = {
+ .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+ .mask = DA9063_M_TEMP,
+ },
+ [DA9063_IRQ_COMP_1V2] = {
+ .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+ .mask = DA9063_M_COMP_1V2,
+ },
+ [DA9063_IRQ_LDO_LIM] = {
+ .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+ .mask = DA9063_M_LDO_LIM,
+ },
+ [DA9063_IRQ_REG_UVOV] = {
+ .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+ .mask = DA9063_M_UVOV,
+ },
+ [DA9063_IRQ_VDD_MON] = {
+ .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+ .mask = DA9063_M_VDD_MON,
+ },
+ [DA9063_IRQ_WARN] = {
+ .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+ .mask = DA9063_M_VDD_WARN,
+ },
+ /* DA9063 event C register */
+ [DA9063_IRQ_GPI0] = {
+ .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+ .mask = DA9063_M_GPI0,
+ },
+ [DA9063_IRQ_GPI1] = {
+ .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+ .mask = DA9063_M_GPI1,
+ },
+ [DA9063_IRQ_GPI2] = {
+ .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+ .mask = DA9063_M_GPI2,
+ },
+ [DA9063_IRQ_GPI3] = {
+ .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+ .mask = DA9063_M_GPI3,
+ },
+ [DA9063_IRQ_GPI4] = {
+ .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+ .mask = DA9063_M_GPI4,
+ },
+ [DA9063_IRQ_GPI5] = {
+ .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+ .mask = DA9063_M_GPI5,
+ },
+ [DA9063_IRQ_GPI6] = {
+ .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+ .mask = DA9063_M_GPI6,
+ },
+ [DA9063_IRQ_GPI7] = {
+ .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+ .mask = DA9063_M_GPI7,
+ },
+ /* DA9063 event D register */
+ [DA9063_IRQ_GPI8] = {
+ .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+ .mask = DA9063_M_GPI8,
+ },
+ [DA9063_IRQ_GPI9] = {
+ .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+ .mask = DA9063_M_GPI9,
+ },
+ [DA9063_IRQ_GPI10] = {
+ .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+ .mask = DA9063_M_GPI10,
+ },
+ [DA9063_IRQ_GPI11] = {
+ .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+ .mask = DA9063_M_GPI11,
+ },
+ [DA9063_IRQ_GPI12] = {
+ .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+ .mask = DA9063_M_GPI12,
+ },
+ [DA9063_IRQ_GPI13] = {
+ .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+ .mask = DA9063_M_GPI13,
+ },
+ [DA9063_IRQ_GPI14] = {
+ .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+ .mask = DA9063_M_GPI14,
+ },
+ [DA9063_IRQ_GPI15] = {
+ .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+ .mask = DA9063_M_GPI15,
+ },
+};
+
+static struct regmap_irq_chip da9063_irq_chip = {
+ .name = "da9063-irq",
+ .irqs = da9063_irqs,
+ .num_irqs = DA9063_NUM_IRQ,
+
+ .num_regs = 4,
+ .status_base = DA9063_REG_EVENT_A,
+ .mask_base = DA9063_REG_IRQ_MASK_A,
+ .ack_base = DA9063_REG_EVENT_A,
+ .init_ack_masked = true,
+};
+
+int da9063_irq_init(struct da9063 *da9063)
+{
+ int ret;
+
+ if (!da9063->chip_irq) {
+ dev_err(da9063->dev, "No IRQ configured\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_add_irq_chip(da9063->regmap, da9063->chip_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
+ da9063->irq_base, &da9063_irq_chip,
+ &da9063->regmap_irq);
+ if (ret) {
+ dev_err(da9063->dev, "Failed to reguest IRQ %d: %d\n",
+ da9063->chip_irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void da9063_irq_exit(struct da9063 *da9063)
+{
+ regmap_del_irq_chip(da9063->chip_irq, da9063->regmap_irq);
+}
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index fb643985..013ba81 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -27,21 +27,16 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/regmap.h>
#include <sound/pcm.h>
#include <linux/mfd/davinci_voicecodec.h>
-u32 davinci_vc_read(struct davinci_vc *davinci_vc, int reg)
-{
- return __raw_readl(davinci_vc->base + reg);
-}
-
-void davinci_vc_write(struct davinci_vc *davinci_vc,
- int reg, u32 val)
-{
- __raw_writel(val, davinci_vc->base + reg);
-}
+static struct regmap_config davinci_vc_regmap = {
+ .reg_bits = 32,
+ .val_bits = 32,
+};
static int __init davinci_vc_probe(struct platform_device *pdev)
{
@@ -74,6 +69,14 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
goto fail;
}
+ davinci_vc->regmap = devm_regmap_init_mmio(&pdev->dev,
+ davinci_vc->base,
+ &davinci_vc_regmap);
+ if (IS_ERR(davinci_vc->regmap)) {
+ ret = PTR_ERR(davinci_vc->regmap);
+ goto fail;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!res) {
dev_err(&pdev->dev, "no DMA resource\n");
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 0d68eb1..53f371d 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -465,7 +465,7 @@ static DEFINE_SPINLOCK(clk_mgt_lock);
#define CLK_MGT_ENTRY(_name, _branch, _clk38div)[PRCMU_##_name] = \
{ (PRCM_##_name##_MGT), 0 , _branch, _clk38div}
-struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
+static struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
CLK_MGT_ENTRY(SGACLK, PLL_DIV, false),
CLK_MGT_ENTRY(UARTCLK, PLL_FIX, true),
CLK_MGT_ENTRY(MSP02CLK, PLL_FIX, true),
@@ -2319,7 +2319,7 @@ unlock_and_return:
/**
* prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
*/
-void prcmu_ac_sleep_req()
+void prcmu_ac_sleep_req(void)
{
u32 val;
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 7710227..7a55c00 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -315,8 +315,8 @@ static int add_children(struct i2c_client *client)
}
/* MMC/SD inputs -- right after the last config input */
- if (client->dev.platform_data) {
- void (*mmcsd_setup)(unsigned) = client->dev.platform_data;
+ if (dev_get_platdata(&client->dev)) {
+ void (*mmcsd_setup)(unsigned) = dev_get_platdata(&client->dev);
mmcsd_setup(dm355evm_msp_gpio.base + 8 + 5);
}
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 5502106..7245b0c 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -177,7 +177,7 @@ static void pcap_msr_work(struct work_struct *work)
static void pcap_isr_work(struct work_struct *work)
{
struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
- struct pcap_platform_data *pdata = pcap->spi->dev.platform_data;
+ struct pcap_platform_data *pdata = dev_get_platdata(&pcap->spi->dev);
u32 msr, isr, int_sel, service;
int irq;
@@ -394,7 +394,7 @@ static int pcap_add_subdev(struct pcap_chip *pcap,
static int ezx_pcap_remove(struct spi_device *spi)
{
struct pcap_chip *pcap = spi_get_drvdata(spi);
- struct pcap_platform_data *pdata = spi->dev.platform_data;
+ struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
int i, adc_irq;
/* remove all registered subdevs */
@@ -420,7 +420,7 @@ static int ezx_pcap_remove(struct spi_device *spi)
static int ezx_pcap_probe(struct spi_device *spi)
{
- struct pcap_platform_data *pdata = spi->dev.platform_data;
+ struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
struct pcap_chip *pcap;
int i, adc_irq;
int ret = -ENODEV;
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 26aca54..49f39fe 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -261,7 +261,7 @@ static void egpio_write_cache(struct egpio_info *ei)
static int __init egpio_probe(struct platform_device *pdev)
{
- struct htc_egpio_platform_data *pdata = pdev->dev.platform_data;
+ struct htc_egpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
struct egpio_info *ei;
struct gpio_chip *chip;
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index c9dfce6..d7b2a75 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -340,7 +340,7 @@ static int htcpld_setup_chip_irq(
int ret = 0;
/* Get the platform and driver data */
- pdata = dev->platform_data;
+ pdata = dev_get_platdata(dev);
htcpld = platform_get_drvdata(pdev);
chip = &htcpld->chip[chip_index];
plat_chip_data = &pdata->chip[chip_index];
@@ -375,7 +375,7 @@ static int htcpld_register_chip_i2c(
struct i2c_board_info info;
/* Get the platform and driver data */
- pdata = dev->platform_data;
+ pdata = dev_get_platdata(dev);
htcpld = platform_get_drvdata(pdev);
chip = &htcpld->chip[chip_index];
plat_chip_data = &pdata->chip[chip_index];
@@ -447,7 +447,7 @@ static int htcpld_register_chip_gpio(
int ret = 0;
/* Get the platform and driver data */
- pdata = dev->platform_data;
+ pdata = dev_get_platdata(dev);
htcpld = platform_get_drvdata(pdev);
chip = &htcpld->chip[chip_index];
plat_chip_data = &pdata->chip[chip_index];
@@ -509,7 +509,7 @@ static int htcpld_setup_chips(struct platform_device *pdev)
int i;
/* Get the platform and driver data */
- pdata = dev->platform_data;
+ pdata = dev_get_platdata(dev);
htcpld = platform_get_drvdata(pdev);
/* Setup each chip's output GPIOs */
@@ -574,7 +574,7 @@ static int htcpld_core_probe(struct platform_device *pdev)
if (!dev)
return -ENODEV;
- pdata = dev->platform_data;
+ pdata = dev_get_platdata(dev);
if (!pdata) {
dev_warn(dev, "Platform data not found for htcpld core!\n");
return -ENXIO;
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 0a5e85f..6bf92a5 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -126,7 +126,7 @@ static struct mfd_cell ds1wm_cell __initdata = {
static int __init pasic3_probe(struct platform_device *pdev)
{
- struct pasic3_platform_data *pdata = pdev->dev.platform_data;
+ struct pasic3_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct pasic3_data *asic;
struct resource *r;
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 4f2462f..9203d47 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -310,7 +310,7 @@ EXPORT_SYMBOL_GPL(intel_msic_irq_read);
static int intel_msic_init_devices(struct intel_msic *msic)
{
struct platform_device *pdev = msic->pdev;
- struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
+ struct intel_msic_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret, i;
if (pdata->gpio) {
@@ -372,7 +372,7 @@ static void intel_msic_remove_devices(struct intel_msic *msic)
static int intel_msic_probe(struct platform_device *pdev)
{
- struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
+ struct intel_msic_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct intel_msic *msic;
struct resource *res;
u8 id0, id1;
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 686a456..d3e2327 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(kempld_write32);
*/
void kempld_get_mutex(struct kempld_device_data *pld)
{
- struct kempld_platform_data *pdata = pld->dev->platform_data;
+ struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
mutex_lock(&pld->lock);
pdata->get_hardware_mutex(pld);
@@ -271,7 +271,7 @@ EXPORT_SYMBOL_GPL(kempld_get_mutex);
*/
void kempld_release_mutex(struct kempld_device_data *pld)
{
- struct kempld_platform_data *pdata = pld->dev->platform_data;
+ struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
pdata->release_hardware_mutex(pld);
mutex_unlock(&pld->lock);
@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(kempld_release_mutex);
*/
static int kempld_get_info(struct kempld_device_data *pld)
{
- struct kempld_platform_data *pdata = pld->dev->platform_data;
+ struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
return pdata->get_info(pld);
}
@@ -302,7 +302,7 @@ static int kempld_get_info(struct kempld_device_data *pld)
*/
static int kempld_register_cells(struct kempld_device_data *pld)
{
- struct kempld_platform_data *pdata = pld->dev->platform_data;
+ struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
return pdata->register_cells(pld);
}
@@ -357,7 +357,7 @@ static int kempld_detect_device(struct kempld_device_data *pld)
static int kempld_probe(struct platform_device *pdev)
{
- struct kempld_platform_data *pdata = pdev->dev.platform_data;
+ struct kempld_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct kempld_device_data *pld;
struct resource *ioport;
@@ -394,7 +394,7 @@ static int kempld_probe(struct platform_device *pdev)
static int kempld_remove(struct platform_device *pdev)
{
struct kempld_device_data *pld = platform_get_drvdata(pdev);
- struct kempld_platform_data *pdata = pld->dev->platform_data;
+ struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
mfd_remove_devices(&pdev->dev);
pdata->release_hardware_mutex(pld);
@@ -413,6 +413,15 @@ static struct platform_driver kempld_driver = {
static struct dmi_system_id __initdata kempld_dmi_table[] = {
{
+ .ident = "BHL6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-bHL6"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ },
+ {
.ident = "CCR2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -596,6 +605,15 @@ static struct dmi_system_id __initdata kempld_dmi_table[] = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
},
+ {
+ .ident = "UTH6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-cTH6"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, kempld_dmi_table);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index 4b7e6da..8c29f7b 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -384,7 +384,7 @@ static struct attribute_group lm3533_attribute_group = {
static int lm3533_device_als_init(struct lm3533 *lm3533)
{
- struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ struct lm3533_platform_data *pdata = dev_get_platdata(lm3533->dev);
int ret;
if (!pdata->als)
@@ -407,7 +407,7 @@ static int lm3533_device_als_init(struct lm3533 *lm3533)
static int lm3533_device_bl_init(struct lm3533 *lm3533)
{
- struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ struct lm3533_platform_data *pdata = dev_get_platdata(lm3533->dev);
int i;
int ret;
@@ -436,7 +436,7 @@ static int lm3533_device_bl_init(struct lm3533 *lm3533)
static int lm3533_device_led_init(struct lm3533 *lm3533)
{
- struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ struct lm3533_platform_data *pdata = dev_get_platdata(lm3533->dev);
int i;
int ret;
@@ -481,7 +481,7 @@ static int lm3533_device_setup(struct lm3533 *lm3533,
static int lm3533_device_init(struct lm3533 *lm3533)
{
- struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ struct lm3533_platform_data *pdata = dev_get_platdata(lm3533->dev);
int ret;
dev_dbg(lm3533->dev, "%s\n", __func__);
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index c3d3c9b..0f12219 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -173,7 +173,7 @@ static const struct regmap_config lp8788_regmap_config = {
static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct lp8788 *lp;
- struct lp8788_platform_data *pdata = cl->dev.platform_data;
+ struct lp8788_platform_data *pdata = dev_get_platdata(&cl->dev);
int ret;
lp = devm_kzalloc(&cl->dev, sizeof(struct lp8788), GFP_KERNEL);
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 2403332..9483bc8 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -213,7 +213,7 @@ enum lpc_chipsets {
LPC_COLETO, /* Coleto Creek */
};
-struct lpc_ich_info lpc_chipset_info[] = {
+static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_ICH] = {
.name = "ICH",
.iTCO_version = 1,
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index f27a218..522be67 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -77,7 +77,7 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max77686_dev *max77686 = NULL;
- struct max77686_platform_data *pdata = i2c->dev.platform_data;
+ struct max77686_platform_data *pdata = dev_get_platdata(&i2c->dev);
unsigned int data;
int ret = 0;
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 9e60fed..c04723e 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -110,7 +110,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max77693_dev *max77693;
- struct max77693_platform_data *pdata = i2c->dev.platform_data;
+ struct max77693_platform_data *pdata = dev_get_platdata(&i2c->dev);
u8 reg_data;
int ret = 0;
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 8042b32..de7fb80 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -151,7 +151,7 @@ static int max8925_dt_init(struct device_node *np, struct device *dev,
static int max8925_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct max8925_platform_data *pdata = client->dev.platform_data;
+ struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
static struct max8925_chip *chip;
struct device_node *node = client->dev.of_node;
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 1471405..cee098c 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -51,7 +51,7 @@ static struct mfd_cell max8997_devs[] = {
#ifdef CONFIG_OF
static struct of_device_id max8997_pmic_dt_match[] = {
- { .compatible = "maxim,max8997-pmic", .data = TYPE_MAX8997 },
+ { .compatible = "maxim,max8997-pmic", .data = (void *)TYPE_MAX8997 },
{},
};
#endif
@@ -188,10 +188,11 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max8997_dev *max8997;
- struct max8997_platform_data *pdata = i2c->dev.platform_data;
+ struct max8997_platform_data *pdata = dev_get_platdata(&i2c->dev);
int ret = 0;
- max8997 = kzalloc(sizeof(struct max8997_dev), GFP_KERNEL);
+ max8997 = devm_kzalloc(&i2c->dev, sizeof(struct max8997_dev),
+ GFP_KERNEL);
if (max8997 == NULL)
return -ENOMEM;
@@ -203,14 +204,12 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
if (max8997->dev->of_node) {
pdata = max8997_i2c_parse_dt_pdata(max8997->dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto err;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
}
if (!pdata)
- goto err;
+ return ret;
max8997->pdata = pdata;
max8997->ono = pdata->ono;
@@ -250,8 +249,6 @@ err_mfd:
i2c_unregister_device(max8997->muic);
i2c_unregister_device(max8997->haptic);
i2c_unregister_device(max8997->rtc);
-err:
- kfree(max8997);
return ret;
}
@@ -263,7 +260,6 @@ static int max8997_i2c_remove(struct i2c_client *i2c)
i2c_unregister_device(max8997->muic);
i2c_unregister_device(max8997->haptic);
i2c_unregister_device(max8997->rtc);
- kfree(max8997);
return 0;
}
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 21af51a..fe6332d 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -184,11 +184,12 @@ static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c,
static int max8998_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct max8998_platform_data *pdata = i2c->dev.platform_data;
+ struct max8998_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct max8998_dev *max8998;
int ret = 0;
- max8998 = kzalloc(sizeof(struct max8998_dev), GFP_KERNEL);
+ max8998 = devm_kzalloc(&i2c->dev, sizeof(struct max8998_dev),
+ GFP_KERNEL);
if (max8998 == NULL)
return -ENOMEM;
@@ -246,7 +247,6 @@ err:
mfd_remove_devices(max8998->dev);
max8998_irq_exit(max8998);
i2c_unregister_device(max8998->rtc);
- kfree(max8998);
return ret;
}
@@ -257,7 +257,6 @@ static int max8998_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(max8998->dev);
max8998_irq_exit(max8998);
i2c_unregister_device(max8998->rtc);
- kfree(max8998);
return 0;
}
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 13198d9..41c31b3 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -156,7 +156,7 @@ static struct mcp_ops mcp_sa11x0 = {
static int mcp_sa11x0_probe(struct platform_device *dev)
{
- struct mcp_plat_data *data = dev->dev.platform_data;
+ struct mcp_plat_data *data = dev_get_platdata(&dev->dev);
struct resource *mem0, *mem1;
struct mcp_sa11x0 *m;
struct mcp *mcp;
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 998ce8c..ad25bfa 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -442,7 +442,7 @@ void menelaus_unregister_mmc_callback(void)
menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ);
the_menelaus->mmc_callback = NULL;
- the_menelaus->mmc_callback_data = 0;
+ the_menelaus->mmc_callback_data = NULL;
}
EXPORT_SYMBOL(menelaus_unregister_mmc_callback);
@@ -466,7 +466,7 @@ static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV,
struct i2c_client *c = the_menelaus->client;
mutex_lock(&the_menelaus->lock);
- if (vtg == 0)
+ if (!vtg)
goto set_voltage;
ret = menelaus_read_reg(vtg->vtg_reg);
@@ -1189,7 +1189,7 @@ static int menelaus_probe(struct i2c_client *client,
int rev = 0, val;
int err = 0;
struct menelaus_platform_data *menelaus_pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
if (the_menelaus) {
dev_dbg(&client->dev, "only one %s for now\n",
@@ -1197,7 +1197,7 @@ static int menelaus_probe(struct i2c_client *client,
return -ENODEV;
}
- menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
+ menelaus = devm_kzalloc(&client->dev, sizeof(*menelaus), GFP_KERNEL);
if (!menelaus)
return -ENOMEM;
@@ -1210,8 +1210,7 @@ static int menelaus_probe(struct i2c_client *client,
rev = menelaus_read_reg(MENELAUS_REV);
if (rev < 0) {
pr_err(DRIVER_NAME ": device not found");
- err = -ENODEV;
- goto fail1;
+ return -ENODEV;
}
/* Ack and disable all Menelaus interrupts */
@@ -1231,7 +1230,7 @@ static int menelaus_probe(struct i2c_client *client,
if (err) {
dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
client->irq, err);
- goto fail1;
+ return err;
}
}
@@ -1242,7 +1241,7 @@ static int menelaus_probe(struct i2c_client *client,
val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
if (val < 0)
- goto fail2;
+ goto fail;
if (val & (1 << 7))
menelaus->vcore_hw_mode = 1;
else
@@ -1251,17 +1250,15 @@ static int menelaus_probe(struct i2c_client *client,
if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
err = menelaus_pdata->late_init(&client->dev);
if (err < 0)
- goto fail2;
+ goto fail;
}
menelaus_rtc_init(menelaus);
return 0;
-fail2:
+fail:
free_irq(client->irq, menelaus);
flush_work(&menelaus->work);
-fail1:
- kfree(menelaus);
return err;
}
@@ -1271,7 +1268,6 @@ static int __exit menelaus_remove(struct i2c_client *client)
free_irq(client->irq, menelaus);
flush_work(&menelaus->work);
- kfree(menelaus);
the_menelaus = NULL;
return 0;
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 7604f4e..f421586 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -96,6 +96,8 @@ static int mfd_add_device(struct device *parent, int id,
pdev->dev.parent = parent;
pdev->dev.type = &mfd_dev_type;
+ pdev->dev.dma_mask = parent->dma_mask;
+ pdev->dev.dma_parms = parent->dma_parms;
if (parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 759fae3..29ee54d 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -114,7 +114,7 @@ struct usbhs_hcd_omap {
};
/*-------------------------------------------------------------------------*/
-const char usbhs_driver_name[] = USBHS_DRIVER_NAME;
+static const char usbhs_driver_name[] = USBHS_DRIVER_NAME;
static u64 usbhs_dmamask = DMA_BIT_MASK(32);
/*-------------------------------------------------------------------------*/
@@ -232,7 +232,7 @@ err_end:
static int omap_usbhs_alloc_children(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usbhs_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
struct platform_device *ehci;
struct platform_device *ohci;
struct resource *res;
@@ -571,7 +571,7 @@ static struct of_device_id usbhs_child_match_table[] = {
static int usbhs_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usbhs_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
struct usbhs_hcd_omap *omap;
struct resource *res;
int ret = 0;
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index e4d1c70..135afab 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -25,6 +25,52 @@
#include <linux/mfd/palmas.h>
#include <linux/of_device.h>
+#define PALMAS_EXT_REQ (PALMAS_EXT_CONTROL_ENABLE1 | \
+ PALMAS_EXT_CONTROL_ENABLE2 | \
+ PALMAS_EXT_CONTROL_NSLEEP)
+
+struct palmas_sleep_requestor_info {
+ int id;
+ int reg_offset;
+ int bit_pos;
+};
+
+#define EXTERNAL_REQUESTOR(_id, _offset, _pos) \
+ [PALMAS_EXTERNAL_REQSTR_ID_##_id] = { \
+ .id = PALMAS_EXTERNAL_REQSTR_ID_##_id, \
+ .reg_offset = _offset, \
+ .bit_pos = _pos, \
+ }
+
+static struct palmas_sleep_requestor_info sleep_req_info[] = {
+ EXTERNAL_REQUESTOR(REGEN1, 0, 0),
+ EXTERNAL_REQUESTOR(REGEN2, 0, 1),
+ EXTERNAL_REQUESTOR(SYSEN1, 0, 2),
+ EXTERNAL_REQUESTOR(SYSEN2, 0, 3),
+ EXTERNAL_REQUESTOR(CLK32KG, 0, 4),
+ EXTERNAL_REQUESTOR(CLK32KGAUDIO, 0, 5),
+ EXTERNAL_REQUESTOR(REGEN3, 0, 6),
+ EXTERNAL_REQUESTOR(SMPS12, 1, 0),
+ EXTERNAL_REQUESTOR(SMPS3, 1, 1),
+ EXTERNAL_REQUESTOR(SMPS45, 1, 2),
+ EXTERNAL_REQUESTOR(SMPS6, 1, 3),
+ EXTERNAL_REQUESTOR(SMPS7, 1, 4),
+ EXTERNAL_REQUESTOR(SMPS8, 1, 5),
+ EXTERNAL_REQUESTOR(SMPS9, 1, 6),
+ EXTERNAL_REQUESTOR(SMPS10, 1, 7),
+ EXTERNAL_REQUESTOR(LDO1, 2, 0),
+ EXTERNAL_REQUESTOR(LDO2, 2, 1),
+ EXTERNAL_REQUESTOR(LDO3, 2, 2),
+ EXTERNAL_REQUESTOR(LDO4, 2, 3),
+ EXTERNAL_REQUESTOR(LDO5, 2, 4),
+ EXTERNAL_REQUESTOR(LDO6, 2, 5),
+ EXTERNAL_REQUESTOR(LDO7, 2, 6),
+ EXTERNAL_REQUESTOR(LDO8, 2, 7),
+ EXTERNAL_REQUESTOR(LDO9, 3, 0),
+ EXTERNAL_REQUESTOR(LDOLN, 3, 1),
+ EXTERNAL_REQUESTOR(LDOUSB, 3, 2),
+};
+
static const struct regmap_config palmas_regmap_config[PALMAS_NUM_CLIENTS] = {
{
.reg_bits = 8,
@@ -186,6 +232,57 @@ static struct regmap_irq_chip palmas_irq_chip = {
PALMAS_INT1_MASK),
};
+int palmas_ext_control_req_config(struct palmas *palmas,
+ enum palmas_external_requestor_id id, int ext_ctrl, bool enable)
+{
+ int preq_mask_bit = 0;
+ int reg_add = 0;
+ int bit_pos;
+ int ret;
+
+ if (!(ext_ctrl & PALMAS_EXT_REQ))
+ return 0;
+
+ if (id >= PALMAS_EXTERNAL_REQSTR_ID_MAX)
+ return 0;
+
+ if (ext_ctrl & PALMAS_EXT_CONTROL_NSLEEP) {
+ reg_add = PALMAS_NSLEEP_RES_ASSIGN;
+ preq_mask_bit = 0;
+ } else if (ext_ctrl & PALMAS_EXT_CONTROL_ENABLE1) {
+ reg_add = PALMAS_ENABLE1_RES_ASSIGN;
+ preq_mask_bit = 1;
+ } else if (ext_ctrl & PALMAS_EXT_CONTROL_ENABLE2) {
+ reg_add = PALMAS_ENABLE2_RES_ASSIGN;
+ preq_mask_bit = 2;
+ }
+
+ bit_pos = sleep_req_info[id].bit_pos;
+ reg_add += sleep_req_info[id].reg_offset;
+ if (enable)
+ ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
+ reg_add, BIT(bit_pos), BIT(bit_pos));
+ else
+ ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
+ reg_add, BIT(bit_pos), 0);
+ if (ret < 0) {
+ dev_err(palmas->dev, "Resource reg 0x%02x update failed %d\n",
+ reg_add, ret);
+ return ret;
+ }
+
+ /* Unmask the PREQ */
+ ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+ PALMAS_POWER_CTRL, BIT(preq_mask_bit), 0);
+ if (ret < 0) {
+ dev_err(palmas->dev, "POWER_CTRL register update failed %d\n",
+ ret);
+ return ret;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(palmas_ext_control_req_config);
+
static int palmas_set_pdata_irq_flag(struct i2c_client *i2c,
struct palmas_platform_data *pdata)
{
@@ -229,6 +326,32 @@ static void palmas_dt_to_pdata(struct i2c_client *i2c,
PALMAS_POWER_CTRL_ENABLE2_MASK;
if (i2c->irq)
palmas_set_pdata_irq_flag(i2c, pdata);
+
+ pdata->pm_off = of_property_read_bool(node,
+ "ti,system-power-controller");
+}
+
+static struct palmas *palmas_dev;
+static void palmas_power_off(void)
+{
+ unsigned int addr;
+ int ret, slave;
+
+ if (!palmas_dev)
+ return;
+
+ slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
+ addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_DEV_CTRL);
+
+ ret = regmap_update_bits(
+ palmas_dev->regmap[slave],
+ addr,
+ PALMAS_DEV_CTRL_DEV_ON,
+ 0);
+
+ if (ret)
+ pr_err("%s: Unable to write to DEV_CTRL_DEV_ON: %d\n",
+ __func__, ret);
}
static unsigned int palmas_features = PALMAS_PMIC_FEATURE_SMPS10_BOOST;
@@ -423,10 +546,13 @@ no_irq:
*/
if (node) {
ret = of_platform_populate(node, NULL, NULL, &i2c->dev);
- if (ret < 0)
+ if (ret < 0) {
goto err_irq;
- else
+ } else if (pdata->pm_off && !pm_power_off) {
+ palmas_dev = palmas;
+ pm_power_off = palmas_power_off;
return ret;
+ }
}
return ret;
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index 18b53cb..b8941a5 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -203,7 +203,7 @@ static int pcf50633_adc_probe(struct platform_device *pdev)
{
struct pcf50633_adc *adc;
- adc = kzalloc(sizeof(*adc), GFP_KERNEL);
+ adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
if (!adc)
return -ENOMEM;
@@ -236,7 +236,6 @@ static int pcf50633_adc_remove(struct platform_device *pdev)
kfree(adc->queue[i]);
mutex_unlock(&adc->queue_mutex);
- kfree(adc);
return 0;
}
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index d115673..6841d68 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -195,7 +195,7 @@ static int pcf50633_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
struct pcf50633 *pcf;
- struct pcf50633_platform_data *pdata = client->dev.platform_data;
+ struct pcf50633_platform_data *pdata = dev_get_platdata(&client->dev);
int i, ret;
int version, variant;
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index ecc137f..a6841f7 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -106,7 +107,7 @@ static int pm8921_add_subdevices(const struct pm8921_platform_data
static int pm8921_probe(struct platform_device *pdev)
{
- const struct pm8921_platform_data *pdata = pdev->dev.platform_data;
+ const struct pm8921_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct pm8921 *pmic;
int rc;
u8 val;
@@ -117,7 +118,7 @@ static int pm8921_probe(struct platform_device *pdev)
return -EINVAL;
}
- pmic = kzalloc(sizeof(struct pm8921), GFP_KERNEL);
+ pmic = devm_kzalloc(&pdev->dev, sizeof(struct pm8921), GFP_KERNEL);
if (!pmic) {
pr_err("Cannot alloc pm8921 struct\n");
return -ENOMEM;
@@ -127,7 +128,7 @@ static int pm8921_probe(struct platform_device *pdev)
rc = ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val));
if (rc) {
pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc);
- goto err_read_rev;
+ return rc;
}
pr_info("PMIC revision 1: %02X\n", val);
rev = val;
@@ -137,7 +138,7 @@ static int pm8921_probe(struct platform_device *pdev)
if (rc) {
pr_err("Failed to read hw rev 2 reg %d:rc=%d\n",
REG_HWREV_2, rc);
- goto err_read_rev;
+ return rc;
}
pr_info("PMIC revision 2: %02X\n", val);
rev |= val << BITS_PER_BYTE;
@@ -159,9 +160,6 @@ static int pm8921_probe(struct platform_device *pdev)
err:
mfd_remove_devices(pmic->dev);
- platform_set_drvdata(pdev, NULL);
-err_read_rev:
- kfree(pmic);
return rc;
}
@@ -179,8 +177,6 @@ static int pm8921_remove(struct platform_device *pdev)
pm8xxx_irq_exit(pmic->irq_chip);
pmic->irq_chip = NULL;
}
- platform_set_drvdata(pdev, NULL);
- kfree(pmic);
return 0;
}
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 14bdacc..3463301 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -250,7 +250,7 @@ static int rc5t583_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct rc5t583 *rc5t583;
- struct rc5t583_platform_data *pdata = i2c->dev.platform_data;
+ struct rc5t583_platform_data *pdata = dev_get_platdata(&i2c->dev);
int ret;
bool irq_init_success = false;
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
index c436bf2..5280135 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/mfd/rtl8411.c
@@ -1,6 +1,6 @@
/* Driver for Realtek PCI-Express card reader
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,7 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Roger Tseng <rogerable@realtek.com>
*/
#include <linux/module.h>
@@ -47,19 +47,77 @@ static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
return 0;
}
+static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg1;
+ u8 reg3;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
+
+ if (!rtsx_vendor_setting_valid(reg1))
+ return;
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg1);
+ pcr->sd30_drive_sel_1v8 =
+ map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg1));
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
+
+ rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, &reg3);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
+ pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
+}
+
+static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg))
+ return;
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 =
+ map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
+ pcr->sd30_drive_sel_3v3 =
+ map_sd_drive(rtl8411b_reg_to_sd30_drive_sel_3v3(reg));
+}
+
+static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
+}
+
static int rtl8411_extra_init_hw(struct rtsx_pcr *pcr)
{
- return rtsx_pci_write_register(pcr, CD_PAD_CTL,
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+ 0xFF, pcr->sd30_drive_sel_3v3);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
+
+ return rtsx_pci_send_cmd(pcr, 100);
}
static int rtl8411b_extra_init_hw(struct rtsx_pcr *pcr)
{
- if (rtl8411b_is_qfn48(pcr))
- rtsx_pci_write_register(pcr, CARD_PULL_CTL3, 0xFF, 0xF5);
+ rtsx_pci_init_cmd(pcr);
- return rtsx_pci_write_register(pcr, CD_PAD_CTL,
+ if (rtl8411b_is_qfn48(pcr))
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ CARD_PULL_CTL3, 0xFF, 0xF5);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+ 0xFF, pcr->sd30_drive_sel_3v3);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, FUNC_FORCE_CTL,
+ 0x06, 0x00);
+
+ return rtsx_pci_send_cmd(pcr, 100);
}
static int rtl8411_turn_on_led(struct rtsx_pcr *pcr)
@@ -141,13 +199,13 @@ static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_register(pcr,
- SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
if (err < 0)
return err;
val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
- SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
if (err < 0)
return err;
val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
@@ -222,6 +280,7 @@ static int rtl8411_conv_clk_and_div_n(int input, int dir)
}
static const struct pcr_ops rtl8411_pcr_ops = {
+ .fetch_vendor_settings = rtl8411_fetch_vendor_settings,
.extra_init_hw = rtl8411_extra_init_hw,
.optimize_phy = NULL,
.turn_on_led = rtl8411_turn_on_led,
@@ -233,9 +292,11 @@ static const struct pcr_ops rtl8411_pcr_ops = {
.switch_output_voltage = rtl8411_switch_output_voltage,
.cd_deglitch = rtl8411_cd_deglitch,
.conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+ .force_power_down = rtl8411_force_power_down,
};
static const struct pcr_ops rtl8411b_pcr_ops = {
+ .fetch_vendor_settings = rtl8411b_fetch_vendor_settings,
.extra_init_hw = rtl8411b_extra_init_hw,
.optimize_phy = NULL,
.turn_on_led = rtl8411_turn_on_led,
@@ -247,6 +308,7 @@ static const struct pcr_ops rtl8411b_pcr_ops = {
.switch_output_voltage = rtl8411_switch_output_voltage,
.cd_deglitch = rtl8411_cd_deglitch,
.conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+ .force_power_down = rtl8411_force_power_down,
};
/* SD Pull Control Enable:
@@ -385,6 +447,14 @@ void rtl8411_init_params(struct rtsx_pcr *pcr)
pcr->num_slots = 2;
pcr->ops = &rtl8411_pcr_ops;
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
+
pcr->ic_version = rtl8411_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rtl8411_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rtl8411_sd_pull_ctl_disable_tbl;
@@ -398,6 +468,14 @@ void rtl8411b_init_params(struct rtsx_pcr *pcr)
pcr->num_slots = 2;
pcr->ops = &rtl8411b_pcr_ops;
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
+
pcr->ic_version = rtl8411_get_ic_version(pcr);
if (rtl8411b_is_qfn48(pcr)) {
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
index ec78d9f..cb04174 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/mfd/rts5209.c
@@ -1,6 +1,6 @@
/* Driver for Realtek PCI-Express card reader
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/module.h>
@@ -34,19 +33,34 @@ static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
return val & 0x0F;
}
-static void rts5209_init_vendor_cfg(struct rtsx_pcr *pcr)
+static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
- u32 val;
+ u32 reg;
- rtsx_pci_read_config_dword(pcr, 0x724, &val);
- dev_dbg(&(pcr->pci->dev), "Cfg 0x724: 0x%x\n", val);
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
- if (!(val & 0x80)) {
- if (val & 0x08)
- pcr->ms_pmos = false;
- else
- pcr->ms_pmos = true;
+ if (rts5209_vendor_setting1_valid(reg)) {
+ if (rts5209_reg_check_ms_pmos(reg))
+ pcr->flags |= PCR_MS_PMOS;
+ pcr->aspm_en = rts5209_reg_to_aspm(reg);
}
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+ if (rts5209_vendor_setting2_valid(reg)) {
+ pcr->sd30_drive_sel_1v8 =
+ rts5209_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->sd30_drive_sel_3v3 =
+ rts5209_reg_to_sd30_drive_sel_3v3(reg);
+ pcr->card_drive_sel = rts5209_reg_to_card_drive_sel(reg);
+ }
+}
+
+static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
}
static int rts5209_extra_init_hw(struct rtsx_pcr *pcr)
@@ -55,8 +69,15 @@ static int rts5209_extra_init_hw(struct rtsx_pcr *pcr)
/* Turn off LED */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO, 0xFF, 0x03);
+ /* Reset ASPM state to default value */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+ /* Force CLKREQ# PIN to drive 0 to request clock */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO_DIR, 0xFF, 0x03);
+ /* Configure driving */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+ 0xFF, pcr->sd30_drive_sel_3v3);
return rtsx_pci_send_cmd(pcr, 100);
}
@@ -95,7 +116,7 @@ static int rts5209_card_power_on(struct rtsx_pcr *pcr, int card)
partial_pwr_on = SD_PARTIAL_POWER_ON;
pwr_on = SD_POWER_ON;
- if (pcr->ms_pmos && (card == RTSX_MS_CARD)) {
+ if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
pwr_mask = MS_POWER_MASK;
partial_pwr_on = MS_PARTIAL_POWER_ON;
pwr_on = MS_POWER_ON;
@@ -131,7 +152,7 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
pwr_mask = SD_POWER_MASK;
pwr_off = SD_POWER_OFF;
- if (pcr->ms_pmos && (card == RTSX_MS_CARD)) {
+ if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
pwr_mask = MS_POWER_MASK;
pwr_off = MS_POWER_OFF;
}
@@ -140,7 +161,7 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
pwr_mask | PMOS_STRG_MASK, pwr_off | PMOS_STRG_400mA);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, 0X06);
+ LDO3318_PWR_MASK, 0x06);
return rtsx_pci_send_cmd(pcr, 100);
}
@@ -150,7 +171,7 @@ static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_register(pcr,
- SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
@@ -158,7 +179,7 @@ static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return err;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
- SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
@@ -172,6 +193,7 @@ static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
}
static const struct pcr_ops rts5209_pcr_ops = {
+ .fetch_vendor_settings = rts5209_fetch_vendor_settings,
.extra_init_hw = rts5209_extra_init_hw,
.optimize_phy = rts5209_optimize_phy,
.turn_on_led = rts5209_turn_on_led,
@@ -183,6 +205,7 @@ static const struct pcr_ops rts5209_pcr_ops = {
.switch_output_voltage = rts5209_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
+ .force_power_down = rts5209_force_power_down,
};
/* SD Pull Control Enable:
@@ -242,7 +265,13 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
pcr->num_slots = 2;
pcr->ops = &rts5209_pcr_ops;
- rts5209_init_vendor_cfg(pcr);
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTS5209_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
pcr->ic_version = rts5209_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rts5209_sd_pull_ctl_enable_tbl;
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
index 164b7fa..9c8eec8 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/mfd/rts5227.c
@@ -1,6 +1,6 @@
/* Driver for Realtek PCI-Express card reader
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,10 +17,7 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
- *
* Roger Tseng <rogerable@realtek.com>
- * No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan
*/
#include <linux/module.h>
@@ -29,6 +26,73 @@
#include "rtsx_pcr.h"
+static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x13, 0x13, 0x13},
+ {0x96, 0x96, 0x96},
+ {0x7F, 0x7F, 0x7F},
+ {0x96, 0x96, 0x96},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0xAA, 0xAA, 0xAA},
+ {0xFE, 0xFE, 0xFE},
+ {0xB3, 0xB3, 0xB3},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg))
+ return;
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rts5227_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, PM_CTRL3, 0x10, 0x10);
+
+ rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
{
u16 cap;
@@ -37,6 +101,8 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+ /* Reset ASPM state to default value */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
/* Switch LDO3318 source from DV33 to card_3v3 */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
@@ -48,17 +114,16 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
/* Configure OBFF */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
- /* Configure force_clock_req
- * Maybe We should define 0xFF03 as some name
- */
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 0xFF03, 0x08, 0x08);
- /* Correct driving */
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD30_CLK_DRIVE_SEL, 0xFF, 0x96);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD30_CMD_DRIVE_SEL, 0xFF, 0x96);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD30_DAT_DRIVE_SEL, 0xFF, 0x96);
+ /* Configure driving */
+ rts5227_fill_driving(pcr, OUTPUT_3V3);
+ /* Configure force_clock_req */
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ AUTOLOAD_CFG_BASE + 3, 0xB8, 0xB8);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ AUTOLOAD_CFG_BASE + 3, 0xB8, 0x88);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x10, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
}
@@ -131,13 +196,11 @@ static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
- u8 drive_sel;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
if (err < 0)
return err;
- drive_sel = 0x96;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
if (err < 0)
@@ -145,23 +208,18 @@ static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C80 | 0x24);
if (err < 0)
return err;
- drive_sel = 0xB3;
} else {
return -EINVAL;
}
/* set pad drive */
rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
- 0xFF, drive_sel);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
- 0xFF, drive_sel);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
- 0xFF, drive_sel);
+ rts5227_fill_driving(pcr, voltage);
return rtsx_pci_send_cmd(pcr, 100);
}
static const struct pcr_ops rts5227_pcr_ops = {
+ .fetch_vendor_settings = rts5227_fetch_vendor_settings,
.extra_init_hw = rts5227_extra_init_hw,
.optimize_phy = rts5227_optimize_phy,
.turn_on_led = rts5227_turn_on_led,
@@ -173,6 +231,7 @@ static const struct pcr_ops rts5227_pcr_ops = {
.switch_output_voltage = rts5227_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
+ .force_power_down = rts5227_force_power_down,
};
/* SD Pull Control Enable:
@@ -227,6 +286,14 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
pcr->num_slots = 2;
pcr->ops = &rts5227_pcr_ops;
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
+
pcr->sd_pull_ctl_enable_tbl = rts5227_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rts5227_sd_pull_ctl_disable_tbl;
pcr->ms_pull_ctl_enable_tbl = rts5227_ms_pull_ctl_enable_tbl;
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
index 58af4db..6353f5d 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/mfd/rts5229.c
@@ -1,6 +1,6 @@
/* Driver for Realtek PCI-Express card reader
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/module.h>
@@ -34,17 +33,51 @@ static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
return val & 0x0F;
}
+static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg))
+ return;
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 =
+ map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr->sd30_drive_sel_3v3 =
+ map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
+}
+
+static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
static int rts5229_extra_init_hw(struct rtsx_pcr *pcr)
{
rtsx_pci_init_cmd(pcr);
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+ /* Reset ASPM state to default value */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+ /* Force CLKREQ# PIN to drive 0 to request clock */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
/* Switch LDO3318 source from DV33 to card_3v3 */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
/* LED shine disabled, set initial shine cycle period */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+ /* Configure driving */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+ 0xFF, pcr->sd30_drive_sel_3v3);
return rtsx_pci_send_cmd(pcr, 100);
}
@@ -110,7 +143,7 @@ static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
SD_POWER_MASK | PMOS_STRG_MASK,
SD_POWER_OFF | PMOS_STRG_400mA);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, 0X00);
+ LDO3318_PWR_MASK, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
}
@@ -120,7 +153,7 @@ static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_register(pcr,
- SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
@@ -128,7 +161,7 @@ static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return err;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
- SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
@@ -142,6 +175,7 @@ static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
}
static const struct pcr_ops rts5229_pcr_ops = {
+ .fetch_vendor_settings = rts5229_fetch_vendor_settings,
.extra_init_hw = rts5229_extra_init_hw,
.optimize_phy = rts5229_optimize_phy,
.turn_on_led = rts5229_turn_on_led,
@@ -153,6 +187,7 @@ static const struct pcr_ops rts5229_pcr_ops = {
.switch_output_voltage = rts5229_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
+ .force_power_down = rts5229_force_power_down,
};
/* SD Pull Control Enable:
@@ -221,6 +256,14 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
pcr->num_slots = 2;
pcr->ops = &rts5229_pcr_ops;
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
+
pcr->ic_version = rts5229_get_ic_version(pcr);
if (pcr->ic_version == IC_VER_C) {
pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl2;
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index 15dc848..3b835f5 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 128, West Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/module.h>
@@ -34,24 +33,95 @@ static u8 rts5249_get_ic_version(struct rtsx_pcr *pcr)
return val & 0x0F;
}
+static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x11, 0x11, 0x11},
+ {0x55, 0x55, 0x5C},
+ {0x99, 0x99, 0x92},
+ {0x99, 0x99, 0x92},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x3C, 0x3C, 0x3C},
+ {0xB3, 0xB3, 0xB3},
+ {0xFE, 0xFE, 0xFE},
+ {0xC4, 0xC4, 0xC4},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rts5249_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg))
+ return;
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rts5249_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, PM_CTRL3, 0x10, 0x10);
+
+ rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
{
rtsx_pci_init_cmd(pcr);
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+ /* Reset ASPM state to default value */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
/* Switch LDO3318 source from DV33 to card_3v3 */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
/* LED shine disabled, set initial shine cycle period */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
- /* Correct driving */
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD30_CLK_DRIVE_SEL, 0xFF, 0x99);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD30_CMD_DRIVE_SEL, 0xFF, 0x99);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD30_DAT_DRIVE_SEL, 0xFF, 0x92);
+ /* Configure driving */
+ rts5249_fill_driving(pcr, OUTPUT_3V3);
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ AUTOLOAD_CFG_BASE + 3, 0xB0, 0xB0);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ AUTOLOAD_CFG_BASE + 3, 0xB0, 0x80);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x10, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
}
@@ -129,15 +199,11 @@ static int rts5249_card_power_off(struct rtsx_pcr *pcr, int card)
static int rts5249_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
- u8 clk_drive, cmd_drive, dat_drive;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, 0x4FC0 | 0x24);
if (err < 0)
return err;
- clk_drive = 0x99;
- cmd_drive = 0x99;
- dat_drive = 0x92;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_phy_register(pcr, PHY_BACR, 0x3C02);
if (err < 0)
@@ -145,25 +211,18 @@ static int rts5249_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, 0x4C40 | 0x24);
if (err < 0)
return err;
- clk_drive = 0xb3;
- cmd_drive = 0xb3;
- dat_drive = 0xb3;
} else {
return -EINVAL;
}
/* set pad drive */
rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
- 0xFF, clk_drive);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
- 0xFF, cmd_drive);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
- 0xFF, dat_drive);
+ rts5249_fill_driving(pcr, voltage);
return rtsx_pci_send_cmd(pcr, 100);
}
static const struct pcr_ops rts5249_pcr_ops = {
+ .fetch_vendor_settings = rts5249_fetch_vendor_settings,
.extra_init_hw = rts5249_extra_init_hw,
.optimize_phy = rts5249_optimize_phy,
.turn_on_led = rts5249_turn_on_led,
@@ -173,6 +232,7 @@ static const struct pcr_ops rts5249_pcr_ops = {
.card_power_on = rts5249_card_power_on,
.card_power_off = rts5249_card_power_off,
.switch_output_voltage = rts5249_switch_output_voltage,
+ .force_power_down = rts5249_force_power_down,
};
/* SD Pull Control Enable:
@@ -233,6 +293,14 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->num_slots = 2;
pcr->ops = &rts5249_pcr_ops;
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_C;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
pcr->ic_version = rts5249_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rts5249_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rts5249_sd_pull_ctl_disable_tbl;
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index dd186c4..e6ae772 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1,6 +1,6 @@
/* Driver for Realtek PCI-Express card reader
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/pci.h>
@@ -73,6 +72,9 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
pcr->state = PDEV_STAT_RUN;
if (pcr->ops->enable_auto_blink)
pcr->ops->enable_auto_blink(pcr);
+
+ if (pcr->aspm_en)
+ rtsx_pci_write_config_byte(pcr, LCTLR, 0);
}
mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
@@ -717,7 +719,7 @@ int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
[RTSX_MS_CARD] = MS_EXIST
};
- if (!pcr->ms_pmos) {
+ if (!(pcr->flags & PCR_MS_PMOS)) {
/* When using single PMOS, accessing card is not permitted
* if the existing card is not the designated one.
*/
@@ -918,9 +920,27 @@ static void rtsx_pci_idle_work(struct work_struct *work)
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
+ if (pcr->aspm_en)
+ rtsx_pci_write_config_byte(pcr, LCTLR, pcr->aspm_en);
+
mutex_unlock(&pcr->pcr_mutex);
}
+static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ if (pcr->ops->turn_off_led)
+ pcr->ops->turn_off_led(pcr);
+
+ rtsx_pci_writel(pcr, RTSX_BIER, 0);
+ pcr->bier = 0;
+
+ rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
+ rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
+
+ if (pcr->ops->force_power_down)
+ pcr->ops->force_power_down(pcr, pm_state);
+}
+
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
int err;
@@ -951,13 +971,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
/* Disable card clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
- /* Reset ASPM state to default value */
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
/* Reset delink mode */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
/* Card driving select */
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
- 0x07, DRIVER_TYPE_D);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
+ 0xFF, pcr->card_drive_sel);
/* Enable SSC Clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
0xFF, SSC_8X_EN | SSC_SEL_4M);
@@ -982,13 +1000,13 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
* 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
*/
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
- /* Force CLKREQ# PIN to drive 0 to request clock */
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
+ rtsx_pci_write_config_byte(pcr, LCTLR, 0);
+
/* Enable clk_request_n to enable clock power management */
rtsx_pci_write_config_byte(pcr, 0x81, 1);
/* Enter L1 when host tx idle */
@@ -1053,6 +1071,18 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
if (!pcr->slots)
return -ENOMEM;
+ if (pcr->ops->fetch_vendor_settings)
+ pcr->ops->fetch_vendor_settings(pcr);
+
+ dev_dbg(&(pcr->pci->dev), "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
+ dev_dbg(&(pcr->pci->dev), "pcr->sd30_drive_sel_1v8 = 0x%x\n",
+ pcr->sd30_drive_sel_1v8);
+ dev_dbg(&(pcr->pci->dev), "pcr->sd30_drive_sel_3v3 = 0x%x\n",
+ pcr->sd30_drive_sel_3v3);
+ dev_dbg(&(pcr->pci->dev), "pcr->card_drive_sel = 0x%x\n",
+ pcr->card_drive_sel);
+ dev_dbg(&(pcr->pci->dev), "pcr->flags = 0x%x\n", pcr->flags);
+
pcr->state = PDEV_STAT_IDLE;
err = rtsx_pci_init_hw(pcr);
if (err < 0) {
@@ -1235,7 +1265,6 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
{
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
- int ret = 0;
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
@@ -1247,14 +1276,7 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
mutex_lock(&pcr->pcr_mutex);
- if (pcr->ops->turn_off_led)
- pcr->ops->turn_off_led(pcr);
-
- rtsx_pci_writel(pcr, RTSX_BIER, 0);
- pcr->bier = 0;
-
- rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
- rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x02);
+ rtsx_pci_power_off(pcr, HOST_ENTER_S3);
pci_save_state(pcidev);
pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
@@ -1262,7 +1284,7 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
mutex_unlock(&pcr->pcr_mutex);
- return ret;
+ return 0;
}
static int rtsx_pci_resume(struct pci_dev *pcidev)
@@ -1300,10 +1322,25 @@ out:
return ret;
}
+static void rtsx_pci_shutdown(struct pci_dev *pcidev)
+{
+ struct pcr_handle *handle;
+ struct rtsx_pcr *pcr;
+
+ dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+ handle = pci_get_drvdata(pcidev);
+ pcr = handle->pcr;
+ rtsx_pci_power_off(pcr, HOST_ENTER_S1);
+
+ pci_disable_device(pcidev);
+}
+
#else /* CONFIG_PM */
#define rtsx_pci_suspend NULL
#define rtsx_pci_resume NULL
+#define rtsx_pci_shutdown NULL
#endif /* CONFIG_PM */
@@ -1314,6 +1351,7 @@ static struct pci_driver rtsx_pci_driver = {
.remove = rtsx_pci_remove,
.suspend = rtsx_pci_suspend,
.resume = rtsx_pci_resume,
+ .shutdown = rtsx_pci_shutdown,
};
module_pci_driver(rtsx_pci_driver);
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index c0cac7e..947e79b 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -1,6 +1,6 @@
/* Driver for Realtek PCI-Express card reader
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#ifndef __RTSX_PCR_H
@@ -35,4 +34,33 @@ void rts5227_init_params(struct rtsx_pcr *pcr);
void rts5249_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
+static inline u8 map_sd_drive(int idx)
+{
+ u8 sd_drive[4] = {
+ 0x01, /* Type D */
+ 0x02, /* Type C */
+ 0x05, /* Type A */
+ 0x03 /* Type B */
+ };
+
+ return sd_drive[idx];
+}
+
+#define rtsx_vendor_setting_valid(reg) (!((reg) & 0x1000000))
+#define rts5209_vendor_setting1_valid(reg) (!((reg) & 0x80))
+#define rts5209_vendor_setting2_valid(reg) ((reg) & 0x80)
+
+#define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03)
+#define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03)
+#define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03)
+#define rtsx_reg_to_card_drive_sel(reg) ((((reg) >> 25) & 0x01) << 6)
+#define rtsx_reg_check_reverse_socket(reg) ((reg) & 0x4000)
+#define rts5209_reg_to_aspm(reg) (((reg) >> 5) & 0x03)
+#define rts5209_reg_check_ms_pmos(reg) (!((reg) & 0x08))
+#define rts5209_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 3) & 0x07)
+#define rts5209_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x07)
+#define rts5209_reg_to_card_drive_sel(reg) ((reg) >> 8)
+#define rtl8411_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x07)
+#define rtl8411b_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x03)
+
#endif
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 7976768..f530e4b 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -61,7 +61,9 @@ static struct mfd_cell s5m8767_devs[] = {
static struct mfd_cell s2mps11_devs[] = {
{
.name = "s2mps11-pmic",
- },
+ }, {
+ .name = "s2mps11-clk",
+ }
};
#ifdef CONFIG_OF
@@ -69,6 +71,9 @@ static struct of_device_id sec_dt_match[] = {
{ .compatible = "samsung,s5m8767-pmic",
.data = (void *)S5M8767X,
},
+ { .compatible = "samsung,s2mps11-pmic",
+ .data = (void *)S2MPS11X,
+ },
{},
};
#endif
@@ -103,6 +108,31 @@ int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
}
EXPORT_SYMBOL_GPL(sec_reg_update);
+static bool s2mps11_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPS11_REG_INT1M:
+ case S2MPS11_REG_INT2M:
+ case S2MPS11_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool s5m8763_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S5M8763_REG_IRQM1:
+ case S5M8763_REG_IRQM2:
+ case S5M8763_REG_IRQM3:
+ case S5M8763_REG_IRQM4:
+ return false;
+ default:
+ return true;
+ }
+}
+
static struct regmap_config sec_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -113,6 +143,8 @@ static struct regmap_config s2mps11_regmap_config = {
.val_bits = 8,
.max_register = S2MPS11_REG_L38CTRL,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
};
static struct regmap_config s5m8763_regmap_config = {
@@ -120,6 +152,8 @@ static struct regmap_config s5m8763_regmap_config = {
.val_bits = 8,
.max_register = S5M8763_REG_LBCNFG2,
+ .volatile_reg = s5m8763_volatile,
+ .cache_type = REGCACHE_FLAT,
};
static struct regmap_config s5m8767_regmap_config = {
@@ -127,6 +161,8 @@ static struct regmap_config s5m8767_regmap_config = {
.val_bits = 8,
.max_register = S5M8767_REG_LDO28CTRL,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
};
#ifdef CONFIG_OF
@@ -182,7 +218,7 @@ static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
static int sec_pmic_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct sec_platform_data *pdata = i2c->dev.platform_data;
+ struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev);
const struct regmap_config *regmap;
struct sec_pmic_dev *sec_pmic;
int ret;
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index f5bc8e4..0e4a76d 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -718,7 +718,7 @@ static int si476x_core_probe(struct i2c_client *client,
atomic_set(&core->is_alive, 0);
core->power_state = SI476X_POWER_DOWN;
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (pdata) {
memcpy(&core->power_up_parameters,
&pdata->power_up_parameters,
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 9816c23..33f040c 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -840,7 +840,7 @@ static int sm501_register_uart(struct sm501_devdata *sm, int devices)
if (!pdev)
return -ENOMEM;
- uart_data = pdev->dev.platform_data;
+ uart_data = dev_get_platdata(&pdev->dev);
if (devices & SM501_USE_UART0) {
sm501_setup_uart_data(sm, uart_data++, 0x30000);
@@ -1167,7 +1167,7 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
if (!pdev)
return -ENOMEM;
- icd = pdev->dev.platform_data;
+ icd = dev_get_platdata(&pdev->dev);
/* We keep the pin_sda and pin_scl fields relative in case the
* same platform data is passed to >1 SM501.
@@ -1403,7 +1403,7 @@ static int sm501_plat_probe(struct platform_device *dev)
sm->dev = &dev->dev;
sm->pdev_id = dev->id;
- sm->platdata = dev->dev.platform_data;
+ sm->platdata = dev_get_platdata(&dev->dev);
ret = platform_get_irq(dev, 0);
if (ret < 0) {
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
index d70a3430..65c6fa6 100644
--- a/drivers/mfd/sta2x11-mfd.c
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -133,7 +133,7 @@ int sta2x11_mfd_get_regs_data(struct platform_device *dev,
void __iomem **regs,
spinlock_t **lock)
{
- struct pci_dev *pdev = *(struct pci_dev **)(dev->dev.platform_data);
+ struct pci_dev *pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev);
struct sta2x11_mfd *mfd;
if (!pdev)
@@ -312,7 +312,7 @@ static int sta2x11_mfd_platform_probe(struct platform_device *dev,
const char *name = sta2x11_mfd_names[index];
struct regmap_config *regmap_config = sta2x11_mfd_regmap_configs[index];
- pdev = dev->dev.platform_data;
+ pdev = dev_get_platdata(&dev->dev);
mfd = sta2x11_mfd_find(*pdev);
if (!mfd)
return -ENODEV;
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 5d5e6f9..fff63a4 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1106,7 +1106,8 @@ static int stmpe_devices_init(struct stmpe *stmpe)
return ret;
}
-void stmpe_of_probe(struct stmpe_platform_data *pdata, struct device_node *np)
+static void stmpe_of_probe(struct stmpe_platform_data *pdata,
+ struct device_node *np)
{
struct device_node *child;
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 962a6e1..71841f9 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -25,7 +25,6 @@
static struct platform_driver syscon_driver;
struct syscon {
- void __iomem *base;
struct regmap *regmap;
};
@@ -129,6 +128,7 @@ static int syscon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct syscon *syscon;
struct resource *res;
+ void __iomem *base;
syscon = devm_kzalloc(dev, sizeof(*syscon), GFP_KERNEL);
if (!syscon)
@@ -138,12 +138,12 @@ static int syscon_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- syscon->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!syscon->base)
+ base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!base)
return -ENOMEM;
syscon_regmap_config.max_register = res->end - res->start - 3;
- syscon->regmap = devm_regmap_init_mmio(dev, syscon->base,
+ syscon->regmap = devm_regmap_init_mmio(dev, base,
&syscon_regmap_config);
if (IS_ERR(syscon->regmap)) {
dev_err(dev, "regmap init failed\n");
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index a21bff2..9e04a74 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -281,7 +281,7 @@ static void t7l66xb_detach_irq(struct platform_device *dev)
static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state)
{
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
+ struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
if (pdata && pdata->suspend)
pdata->suspend(dev);
@@ -293,7 +293,7 @@ static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state)
static int t7l66xb_resume(struct platform_device *dev)
{
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
+ struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
clk_enable(t7l66xb->clk48m);
if (pdata && pdata->resume)
@@ -313,7 +313,7 @@ static int t7l66xb_resume(struct platform_device *dev)
static int t7l66xb_probe(struct platform_device *dev)
{
- struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
+ struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct t7l66xb *t7l66xb;
struct resource *iomem, *rscr;
int ret;
@@ -409,7 +409,7 @@ err_noirq:
static int t7l66xb_remove(struct platform_device *dev)
{
- struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
+ struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
int ret;
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 4cb92bb..70f4909f 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -325,7 +325,7 @@ static int tc3589x_of_probe(struct device_node *np,
static int tc3589x_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct tc3589x_platform_data *pdata = i2c->dev.platform_data;
+ struct tc3589x_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct device_node *np = i2c->dev.of_node;
struct tc3589x *tc3589x;
int ret;
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 65c425a..acd0f3a 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -48,7 +48,7 @@ static struct resource tc6387xb_mmc_resources[] = {
static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
{
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
- struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
+ struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
if (pdata && pdata->suspend)
pdata->suspend(dev);
@@ -60,7 +60,7 @@ static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
static int tc6387xb_resume(struct platform_device *dev)
{
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
- struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
+ struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
clk_enable(tc6387xb->clk32k);
if (pdata && pdata->resume)
@@ -140,7 +140,7 @@ static struct mfd_cell tc6387xb_cells[] = {
static int tc6387xb_probe(struct platform_device *dev)
{
- struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
+ struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct resource *iomem, *rscr;
struct clk *clk32k;
struct tc6387xb *tc6387xb;
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index a563dfa..11c19e5 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -604,7 +604,7 @@ static void tc6393xb_detach_irq(struct platform_device *dev)
static int tc6393xb_probe(struct platform_device *dev)
{
- struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
struct tc6393xb *tc6393xb;
struct resource *iomem, *rscr;
int ret, temp;
@@ -733,7 +733,7 @@ err_kzalloc:
static int tc6393xb_remove(struct platform_device *dev)
{
- struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int ret;
@@ -765,7 +765,7 @@ static int tc6393xb_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
{
- struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int i, ret;
@@ -788,7 +788,7 @@ static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
static int tc6393xb_resume(struct platform_device *dev)
{
- struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
int ret;
int i;
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index 09a14ce..1c2b994 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -318,7 +318,7 @@ static irqreturn_t ti_ssp_interrupt(int irq, void *dev_data)
static int ti_ssp_probe(struct platform_device *pdev)
{
static struct ti_ssp *ssp;
- const struct ti_ssp_data *pdata = pdev->dev.platform_data;
+ const struct ti_ssp_data *pdata = dev_get_platdata(&pdev->dev);
int error = 0, prediv = 0xff, id;
unsigned long sysclk;
struct device *dev = &pdev->dev;
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index b003a16..baaf5a8 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -57,20 +57,20 @@ EXPORT_SYMBOL_GPL(am335x_tsc_se_update);
void am335x_tsc_se_set(struct ti_tscadc_dev *tsadc, u32 val)
{
spin_lock(&tsadc->reg_lock);
+ tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
tsadc->reg_se_cache |= val;
- spin_unlock(&tsadc->reg_lock);
-
am335x_tsc_se_update(tsadc);
+ spin_unlock(&tsadc->reg_lock);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_set);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val)
{
spin_lock(&tsadc->reg_lock);
+ tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
tsadc->reg_se_cache &= ~val;
- spin_unlock(&tsadc->reg_lock);
-
am335x_tsc_se_update(tsadc);
+ spin_unlock(&tsadc->reg_lock);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_clr);
@@ -197,24 +197,21 @@ static int ti_tscadc_probe(struct platform_device *pdev)
clock_rate = clk_get_rate(clk);
clk_put(clk);
clk_value = clock_rate / ADC_CLK;
- if (clk_value < MAX_CLK_DIV) {
- dev_err(&pdev->dev, "clock input less than min clock requirement\n");
- err = -EINVAL;
- goto err_disable_clk;
- }
+
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
clk_value = clk_value - 1;
tscadc_writel(tscadc, REG_CLKDIV, clk_value);
/* Set the control register bits */
ctrl = CNTRLREG_STEPCONFIGWRT |
- CNTRLREG_TSCENB |
- CNTRLREG_STEPID |
- CNTRLREG_4WIRE;
+ CNTRLREG_STEPID;
+ if (tsc_wires > 0)
+ ctrl |= CNTRLREG_4WIRE | CNTRLREG_TSCENB;
tscadc_writel(tscadc, REG_CTRL, ctrl);
/* Set register bits for Idle Config Mode */
- tscadc_idle_config(tscadc);
+ if (tsc_wires > 0)
+ tscadc_idle_config(tscadc);
/* Enable the TSC module enable bit */
ctrl = tscadc_readl(tscadc, REG_CTRL);
@@ -294,10 +291,13 @@ static int tscadc_resume(struct device *dev)
pm_runtime_get_sync(dev);
/* context restore */
- ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_TSCENB |
- CNTRLREG_STEPID | CNTRLREG_4WIRE;
+ ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_STEPID;
+ if (tscadc_dev->tsc_cell != -1)
+ ctrl |= CNTRLREG_TSCENB | CNTRLREG_4WIRE;
tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
- tscadc_idle_config(tscadc_dev);
+
+ if (tscadc_dev->tsc_cell != -1)
+ tscadc_idle_config(tscadc_dev);
am335x_tsc_se_update(tscadc_dev);
restore = tscadc_readl(tscadc_dev, REG_CTRL);
tscadc_writel(tscadc_dev, REG_CTRL,
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 0c1fcbc..a6755ec 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -115,11 +115,11 @@ static const struct resource timberdale_ocores_resources[] = {
},
};
-const struct max7301_platform_data timberdale_max7301_platform_data = {
+static const struct max7301_platform_data timberdale_max7301_platform_data = {
.base = 200
};
-const struct mc33880_platform_data timberdale_mc33880_platform_data = {
+static const struct mc33880_platform_data timberdale_mc33880_platform_data = {
.base = 100
};
@@ -781,7 +781,6 @@ static int timb_probe(struct pci_dev *dev,
priv->fw.major, priv->fw.minor, ip_setup);
err = -ENODEV;
goto err_mfd;
- break;
}
if (err) {
@@ -869,34 +868,7 @@ static struct pci_driver timberdale_pci_driver = {
.remove = timb_remove,
};
-static int __init timberdale_init(void)
-{
- int err;
-
- err = pci_register_driver(&timberdale_pci_driver);
- if (err < 0) {
- printk(KERN_ERR
- "Failed to register PCI driver for %s device.\n",
- timberdale_pci_driver.name);
- return -ENODEV;
- }
-
- printk(KERN_INFO "Driver for %s has been successfully registered.\n",
- timberdale_pci_driver.name);
-
- return 0;
-}
-
-static void __exit timberdale_exit(void)
-{
- pci_unregister_driver(&timberdale_pci_driver);
-
- printk(KERN_INFO "Driver for %s has been successfully unregistered.\n",
- timberdale_pci_driver.name);
-}
-
-module_init(timberdale_init);
-module_exit(timberdale_exit);
+module_pci_driver(timberdale_pci_driver);
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 1d302f5..b5dfa6e 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -147,7 +147,7 @@ static int tps6105x_probe(struct i2c_client *client,
i2c_set_clientdata(client, tps6105x);
tps6105x->client = client;
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
tps6105x->pdata = pdata;
mutex_init(&tps6105x->lock);
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index da2691f..743fb52 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -242,8 +242,8 @@ static int dbg_show(struct seq_file *s, void *_)
seq_printf(s, "mask2 %s\n", buf);
/* ignore ackint2 */
- schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
-
+ queue_delayed_work(system_power_efficient_wq, &tps->work,
+ POWER_POLL_DELAY);
/* VMAIN voltage, enable lowpower, etc */
value = i2c_smbus_read_byte_data(tps->client, TPS_VDCDC1);
@@ -400,7 +400,8 @@ static void tps65010_interrupt(struct tps65010 *tps)
&& (tps->chgstatus & (TPS_CHG_USB|TPS_CHG_AC)))
poll = 1;
if (poll)
- schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
+ queue_delayed_work(system_power_efficient_wq, &tps->work,
+ POWER_POLL_DELAY);
/* also potentially gpio-in rise or fall */
}
@@ -448,7 +449,7 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
disable_irq_nosync(irq);
set_bit(FLAG_IRQ_ENABLE, &tps->flags);
- schedule_delayed_work(&tps->work, 0);
+ queue_delayed_work(system_power_efficient_wq, &tps->work, 0);
return IRQ_HANDLED;
}
@@ -517,7 +518,7 @@ static struct tps65010 *the_tps;
static int __exit tps65010_remove(struct i2c_client *client)
{
struct tps65010 *tps = i2c_get_clientdata(client);
- struct tps65010_board *board = client->dev.platform_data;
+ struct tps65010_board *board = dev_get_platdata(&client->dev);
if (board && board->teardown) {
int status = board->teardown(client, board->context);
@@ -529,7 +530,6 @@ static int __exit tps65010_remove(struct i2c_client *client)
free_irq(client->irq, tps);
cancel_delayed_work_sync(&tps->work);
debugfs_remove(tps->file);
- kfree(tps);
the_tps = NULL;
return 0;
}
@@ -539,7 +539,7 @@ static int tps65010_probe(struct i2c_client *client,
{
struct tps65010 *tps;
int status;
- struct tps65010_board *board = client->dev.platform_data;
+ struct tps65010_board *board = dev_get_platdata(&client->dev);
if (the_tps) {
dev_dbg(&client->dev, "only one tps6501x chip allowed\n");
@@ -549,7 +549,7 @@ static int tps65010_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EINVAL;
- tps = kzalloc(sizeof *tps, GFP_KERNEL);
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
@@ -567,7 +567,7 @@ static int tps65010_probe(struct i2c_client *client,
if (status < 0) {
dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
client->irq, status);
- goto fail1;
+ return status;
}
/* annoying race here, ideally we'd have an option
* to claim the irq now and enable it later.
@@ -667,9 +667,6 @@ static int tps65010_probe(struct i2c_client *client,
}
return 0;
-fail1:
- kfree(tps);
- return status;
}
static const struct i2c_device_id tps65010_id[] = {
@@ -718,7 +715,8 @@ int tps65010_set_vbus_draw(unsigned mA)
&& test_and_set_bit(
FLAG_VBUS_CHANGED, &the_tps->flags)) {
/* gadget drivers call this in_irq() */
- schedule_delayed_work(&the_tps->work, 0);
+ queue_delayed_work(system_power_efficient_wq, &the_tps->work,
+ 0);
}
local_irq_restore(flags);
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index fbd6ee6..e6f03a7 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -172,7 +172,7 @@ MODULE_DEVICE_TABLE(of, tps65090_of_match);
static int tps65090_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct tps65090_platform_data *pdata = client->dev.platform_data;
+ struct tps65090_platform_data *pdata = dev_get_platdata(&client->dev);
int irq_base = 0;
struct tps65090 *tps65090;
int ret;
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 4b93ed4..f54fe4d 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -462,7 +462,7 @@ static void tps6586x_power_off(void)
static int tps6586x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct tps6586x_platform_data *pdata = client->dev.platform_data;
+ struct tps6586x_platform_data *pdata = dev_get_platdata(&client->dev);
struct tps6586x *tps6586x;
int ret;
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 479886a..925a044 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -123,7 +123,7 @@ EXPORT_SYMBOL_GPL(tps65912_reg_write);
int tps65912_device_init(struct tps65912 *tps65912)
{
- struct tps65912_board *pmic_plat_data = tps65912->dev->platform_data;
+ struct tps65912_board *pmic_plat_data = dev_get_platdata(tps65912->dev);
struct tps65912_platform_data *init_data;
int ret, dcdc_avs, value;
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index c90a2c4..f15ee6d 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -418,7 +418,7 @@ static const struct regmap_config tps80031_regmap_configs[] = {
static int tps80031_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct tps80031_platform_data *pdata = client->dev.platform_data;
+ struct tps80031_platform_data *pdata = dev_get_platdata(&client->dev);
struct tps80031 *tps80031;
int ret;
uint8_t es_version;
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 7f150d9..29473c2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1137,7 +1137,7 @@ static int twl_remove(struct i2c_client *client)
static int
twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct twl4030_platform_data *pdata = client->dev.platform_data;
+ struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *node = client->dev.of_node;
struct platform_device *pdev;
struct regmap_config *twl_regmap_config;
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index a31fba9..07fe542 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -187,7 +187,7 @@ static bool twl4030_audio_has_vibra(struct twl4030_audio_data *pdata,
static int twl4030_audio_probe(struct platform_device *pdev)
{
struct twl4030_audio *audio;
- struct twl4030_audio_data *pdata = pdev->dev.platform_data;
+ struct twl4030_audio_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
struct mfd_cell *cell = NULL;
int ret, childs = 0;
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 1ea54d4..4c583e4 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -701,7 +701,7 @@ static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
static int twl4030_madc_probe(struct platform_device *pdev)
{
struct twl4030_madc_data *madc;
- struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
+ struct twl4030_madc_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
u8 regval;
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index a5fd3c7..96162b6 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -493,7 +493,7 @@ int twl4030_remove_script(u8 flags)
return err;
}
-int twl4030_power_configure_scripts(struct twl4030_power_data *pdata)
+static int twl4030_power_configure_scripts(struct twl4030_power_data *pdata)
{
int err;
int i;
@@ -509,7 +509,7 @@ int twl4030_power_configure_scripts(struct twl4030_power_data *pdata)
return 0;
}
-int twl4030_power_configure_resources(struct twl4030_power_data *pdata)
+static int twl4030_power_configure_resources(struct twl4030_power_data *pdata)
{
struct twl4030_resconfig *resconfig = pdata->resource_config;
int err;
@@ -553,9 +553,9 @@ static bool twl4030_power_use_poweroff(struct twl4030_power_data *pdata,
return false;
}
-int twl4030_power_probe(struct platform_device *pdev)
+static int twl4030_power_probe(struct platform_device *pdev)
{
- struct twl4030_power_data *pdata = pdev->dev.platform_data;
+ struct twl4030_power_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
int err = 0;
int err2 = 0;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 277a8db..517eda8 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -41,6 +41,7 @@
#include <linux/suspend.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
+#include <linux/of_device.h>
#include "twl-core.h"
@@ -84,39 +85,77 @@ static int twl6030_interrupt_mapping[24] = {
CHARGERFAULT_INTR_OFFSET, /* Bit 22 INT_CHRG */
RSV_INTR_OFFSET, /* Bit 23 Reserved */
};
+
+static int twl6032_interrupt_mapping[24] = {
+ PWR_INTR_OFFSET, /* Bit 0 PWRON */
+ PWR_INTR_OFFSET, /* Bit 1 RPWRON */
+ PWR_INTR_OFFSET, /* Bit 2 SYS_VLOW */
+ RTC_INTR_OFFSET, /* Bit 3 RTC_ALARM */
+ RTC_INTR_OFFSET, /* Bit 4 RTC_PERIOD */
+ HOTDIE_INTR_OFFSET, /* Bit 5 HOT_DIE */
+ SMPSLDO_INTR_OFFSET, /* Bit 6 VXXX_SHORT */
+ PWR_INTR_OFFSET, /* Bit 7 SPDURATION */
+
+ PWR_INTR_OFFSET, /* Bit 8 WATCHDOG */
+ BATDETECT_INTR_OFFSET, /* Bit 9 BAT */
+ SIMDETECT_INTR_OFFSET, /* Bit 10 SIM */
+ MMCDETECT_INTR_OFFSET, /* Bit 11 MMC */
+ MADC_INTR_OFFSET, /* Bit 12 GPADC_RT_EOC */
+ MADC_INTR_OFFSET, /* Bit 13 GPADC_SW_EOC */
+ GASGAUGE_INTR_OFFSET, /* Bit 14 CC_EOC */
+ GASGAUGE_INTR_OFFSET, /* Bit 15 CC_AUTOCAL */
+
+ USBOTG_INTR_OFFSET, /* Bit 16 ID_WKUP */
+ USBOTG_INTR_OFFSET, /* Bit 17 VBUS_WKUP */
+ USBOTG_INTR_OFFSET, /* Bit 18 ID */
+ USB_PRES_INTR_OFFSET, /* Bit 19 VBUS */
+ CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */
+ CHARGERFAULT_INTR_OFFSET, /* Bit 21 EXT_CHRG */
+ CHARGERFAULT_INTR_OFFSET, /* Bit 22 INT_CHRG */
+ RSV_INTR_OFFSET, /* Bit 23 Reserved */
+};
+
/*----------------------------------------------------------------------*/
-static unsigned twl6030_irq_base;
-static int twl_irq;
-static bool twl_irq_wake_enabled;
+struct twl6030_irq {
+ unsigned int irq_base;
+ int twl_irq;
+ bool irq_wake_enabled;
+ atomic_t wakeirqs;
+ struct notifier_block pm_nb;
+ struct irq_chip irq_chip;
+ struct irq_domain *irq_domain;
+ const int *irq_mapping_tbl;
+};
-static struct completion irq_event;
-static atomic_t twl6030_wakeirqs = ATOMIC_INIT(0);
+static struct twl6030_irq *twl6030_irq;
static int twl6030_irq_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
int chained_wakeups;
+ struct twl6030_irq *pdata = container_of(notifier, struct twl6030_irq,
+ pm_nb);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
- chained_wakeups = atomic_read(&twl6030_wakeirqs);
+ chained_wakeups = atomic_read(&pdata->wakeirqs);
- if (chained_wakeups && !twl_irq_wake_enabled) {
- if (enable_irq_wake(twl_irq))
+ if (chained_wakeups && !pdata->irq_wake_enabled) {
+ if (enable_irq_wake(pdata->twl_irq))
pr_err("twl6030 IRQ wake enable failed\n");
else
- twl_irq_wake_enabled = true;
- } else if (!chained_wakeups && twl_irq_wake_enabled) {
- disable_irq_wake(twl_irq);
- twl_irq_wake_enabled = false;
+ pdata->irq_wake_enabled = true;
+ } else if (!chained_wakeups && pdata->irq_wake_enabled) {
+ disable_irq_wake(pdata->twl_irq);
+ pdata->irq_wake_enabled = false;
}
- disable_irq(twl_irq);
+ disable_irq(pdata->twl_irq);
break;
case PM_POST_SUSPEND:
- enable_irq(twl_irq);
+ enable_irq(pdata->twl_irq);
break;
default:
@@ -126,124 +165,77 @@ static int twl6030_irq_pm_notifier(struct notifier_block *notifier,
return NOTIFY_DONE;
}
-static struct notifier_block twl6030_irq_pm_notifier_block = {
- .notifier_call = twl6030_irq_pm_notifier,
-};
-
/*
- * This thread processes interrupts reported by the Primary Interrupt Handler.
- */
-static int twl6030_irq_thread(void *data)
+* Threaded irq handler for the twl6030 interrupt.
+* We query the interrupt controller in the twl6030 to determine
+* which module is generating the interrupt request and call
+* handle_nested_irq for that module.
+*/
+static irqreturn_t twl6030_irq_thread(int irq, void *data)
{
- long irq = (long)data;
- static unsigned i2c_errors;
- static const unsigned max_i2c_errors = 100;
- int ret;
-
- while (!kthread_should_stop()) {
- int i;
- union {
+ int i, ret;
+ union {
u8 bytes[4];
u32 int_sts;
- } sts;
-
- /* Wait for IRQ, then read PIH irq status (also blocking) */
- wait_for_completion_interruptible(&irq_event);
-
- /* read INT_STS_A, B and C in one shot using a burst read */
- ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes,
- REG_INT_STS_A, 3);
- if (ret) {
- pr_warning("twl6030: I2C error %d reading PIH ISR\n",
- ret);
- if (++i2c_errors >= max_i2c_errors) {
- printk(KERN_ERR "Maximum I2C error count"
- " exceeded. Terminating %s.\n",
- __func__);
- break;
- }
- complete(&irq_event);
- continue;
- }
-
-
+ } sts;
+ struct twl6030_irq *pdata = data;
+
+ /* read INT_STS_A, B and C in one shot using a burst read */
+ ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes, REG_INT_STS_A, 3);
+ if (ret) {
+ pr_warn("twl6030_irq: I2C error %d reading PIH ISR\n", ret);
+ return IRQ_HANDLED;
+ }
- sts.bytes[3] = 0; /* Only 24 bits are valid*/
+ sts.bytes[3] = 0; /* Only 24 bits are valid*/
- /*
- * Since VBUS status bit is not reliable for VBUS disconnect
- * use CHARGER VBUS detection status bit instead.
- */
- if (sts.bytes[2] & 0x10)
- sts.bytes[2] |= 0x08;
-
- for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
- local_irq_disable();
- if (sts.int_sts & 0x1) {
- int module_irq = twl6030_irq_base +
- twl6030_interrupt_mapping[i];
- generic_handle_irq(module_irq);
-
- }
- local_irq_enable();
+ /*
+ * Since VBUS status bit is not reliable for VBUS disconnect
+ * use CHARGER VBUS detection status bit instead.
+ */
+ if (sts.bytes[2] & 0x10)
+ sts.bytes[2] |= 0x08;
+
+ for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++)
+ if (sts.int_sts & 0x1) {
+ int module_irq =
+ irq_find_mapping(pdata->irq_domain,
+ pdata->irq_mapping_tbl[i]);
+ if (module_irq)
+ handle_nested_irq(module_irq);
+ else
+ pr_err("twl6030_irq: Unmapped PIH ISR %u detected\n",
+ i);
+ pr_debug("twl6030_irq: PIH ISR %u, virq%u\n",
+ i, module_irq);
}
- /*
- * NOTE:
- * Simulation confirms that documentation is wrong w.r.t the
- * interrupt status clear operation. A single *byte* write to
- * any one of STS_A to STS_C register results in all three
- * STS registers being reset. Since it does not matter which
- * value is written, all three registers are cleared on a
- * single byte write, so we just use 0x0 to clear.
- */
- ret = twl_i2c_write_u8(TWL_MODULE_PIH, 0x00, REG_INT_STS_A);
- if (ret)
- pr_warning("twl6030: I2C error in clearing PIH ISR\n");
-
- enable_irq(irq);
- }
-
- return 0;
-}
+ /*
+ * NOTE:
+ * Simulation confirms that documentation is wrong w.r.t the
+ * interrupt status clear operation. A single *byte* write to
+ * any one of STS_A to STS_C register results in all three
+ * STS registers being reset. Since it does not matter which
+ * value is written, all three registers are cleared on a
+ * single byte write, so we just use 0x0 to clear.
+ */
+ ret = twl_i2c_write_u8(TWL_MODULE_PIH, 0x00, REG_INT_STS_A);
+ if (ret)
+ pr_warn("twl6030_irq: I2C error in clearing PIH ISR\n");
-/*
- * handle_twl6030_int() is the desc->handle method for the twl6030 interrupt.
- * This is a chained interrupt, so there is no desc->action method for it.
- * Now we need to query the interrupt controller in the twl6030 to determine
- * which module is generating the interrupt request. However, we can't do i2c
- * transactions in interrupt context, so we must defer that work to a kernel
- * thread. All we do here is acknowledge and mask the interrupt and wakeup
- * the kernel thread.
- */
-static irqreturn_t handle_twl6030_pih(int irq, void *devid)
-{
- disable_irq_nosync(irq);
- complete(devid);
return IRQ_HANDLED;
}
/*----------------------------------------------------------------------*/
-static inline void activate_irq(int irq)
-{
-#ifdef CONFIG_ARM
- /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
- * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
- */
- set_irq_flags(irq, IRQF_VALID);
-#else
- /* same effect on other architectures */
- irq_set_noprobe(irq);
-#endif
-}
-
static int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
{
+ struct twl6030_irq *pdata = irq_get_chip_data(d->irq);
+
if (on)
- atomic_inc(&twl6030_wakeirqs);
+ atomic_inc(&pdata->wakeirqs);
else
- atomic_dec(&twl6030_wakeirqs);
+ atomic_dec(&pdata->wakeirqs);
return 0;
}
@@ -318,7 +310,8 @@ int twl6030_mmc_card_detect_config(void)
return ret;
}
- return twl6030_irq_base + MMCDETECT_INTR_OFFSET;
+ return irq_find_mapping(twl6030_irq->irq_domain,
+ MMCDETECT_INTR_OFFSET);
}
EXPORT_SYMBOL(twl6030_mmc_card_detect_config);
@@ -347,99 +340,143 @@ int twl6030_mmc_card_detect(struct device *dev, int slot)
}
EXPORT_SYMBOL(twl6030_mmc_card_detect);
+static int twl6030_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct twl6030_irq *pdata = d->host_data;
+
+ irq_set_chip_data(virq, pdata);
+ irq_set_chip_and_handler(virq, &pdata->irq_chip, handle_simple_irq);
+ irq_set_nested_thread(virq, true);
+ irq_set_parent(virq, pdata->twl_irq);
+
+#ifdef CONFIG_ARM
+ /*
+ * ARM requires an extra step to clear IRQ_NOREQUEST, which it
+ * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
+ */
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ /* same effect on other architectures */
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static void twl6030_irq_unmap(struct irq_domain *d, unsigned int virq)
+{
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, 0);
+#endif
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+}
+
+static struct irq_domain_ops twl6030_irq_domain_ops = {
+ .map = twl6030_irq_map,
+ .unmap = twl6030_irq_unmap,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static const struct of_device_id twl6030_of_match[] = {
+ {.compatible = "ti,twl6030", &twl6030_interrupt_mapping},
+ {.compatible = "ti,twl6032", &twl6032_interrupt_mapping},
+ { },
+};
+
int twl6030_init_irq(struct device *dev, int irq_num)
{
struct device_node *node = dev->of_node;
- int nr_irqs, irq_base, irq_end;
- struct task_struct *task;
- static struct irq_chip twl6030_irq_chip;
- int status = 0;
- int i;
+ int nr_irqs;
+ int status;
u8 mask[3];
+ const struct of_device_id *of_id;
- nr_irqs = TWL6030_NR_IRQS;
-
- irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(irq_base)) {
- dev_err(dev, "Fail to allocate IRQ descs\n");
- return irq_base;
+ of_id = of_match_device(twl6030_of_match, dev);
+ if (!of_id || !of_id->data) {
+ dev_err(dev, "Unknown TWL device model\n");
+ return -EINVAL;
}
- irq_domain_add_legacy(node, nr_irqs, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ nr_irqs = TWL6030_NR_IRQS;
- irq_end = irq_base + nr_irqs;
+ twl6030_irq = devm_kzalloc(dev, sizeof(*twl6030_irq), GFP_KERNEL);
+ if (!twl6030_irq) {
+ dev_err(dev, "twl6030_irq: Memory allocation failed\n");
+ return -ENOMEM;
+ }
mask[0] = 0xFF;
mask[1] = 0xFF;
mask[2] = 0xFF;
/* mask all int lines */
- twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_LINE_A, 3);
+ status = twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_LINE_A, 3);
/* mask all int sts */
- twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_STS_A, 3);
+ status |= twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_STS_A, 3);
/* clear INT_STS_A,B,C */
- twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_STS_A, 3);
+ status |= twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_STS_A, 3);
- twl6030_irq_base = irq_base;
+ if (status < 0) {
+ dev_err(dev, "I2C err writing TWL_MODULE_PIH: %d\n", status);
+ return status;
+ }
/*
* install an irq handler for each of the modules;
* clone dummy irq_chip since PIH can't *do* anything
*/
- twl6030_irq_chip = dummy_irq_chip;
- twl6030_irq_chip.name = "twl6030";
- twl6030_irq_chip.irq_set_type = NULL;
- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
-
- for (i = irq_base; i < irq_end; i++) {
- irq_set_chip_and_handler(i, &twl6030_irq_chip,
- handle_simple_irq);
- irq_set_chip_data(i, (void *)irq_num);
- activate_irq(i);
+ twl6030_irq->irq_chip = dummy_irq_chip;
+ twl6030_irq->irq_chip.name = "twl6030";
+ twl6030_irq->irq_chip.irq_set_type = NULL;
+ twl6030_irq->irq_chip.irq_set_wake = twl6030_irq_set_wake;
+
+ twl6030_irq->pm_nb.notifier_call = twl6030_irq_pm_notifier;
+ atomic_set(&twl6030_irq->wakeirqs, 0);
+ twl6030_irq->irq_mapping_tbl = of_id->data;
+
+ twl6030_irq->irq_domain =
+ irq_domain_add_linear(node, nr_irqs,
+ &twl6030_irq_domain_ops, twl6030_irq);
+ if (!twl6030_irq->irq_domain) {
+ dev_err(dev, "Can't add irq_domain\n");
+ return -ENOMEM;
}
- dev_info(dev, "PIH (irq %d) chaining IRQs %d..%d\n",
- irq_num, irq_base, irq_end);
+ dev_info(dev, "PIH (irq %d) nested IRQs\n", irq_num);
/* install an irq handler to demultiplex the TWL6030 interrupt */
- init_completion(&irq_event);
-
- status = request_irq(irq_num, handle_twl6030_pih, 0, "TWL6030-PIH",
- &irq_event);
+ status = request_threaded_irq(irq_num, NULL, twl6030_irq_thread,
+ IRQF_ONESHOT, "TWL6030-PIH", twl6030_irq);
if (status < 0) {
dev_err(dev, "could not claim irq %d: %d\n", irq_num, status);
goto fail_irq;
}
- task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq");
- if (IS_ERR(task)) {
- dev_err(dev, "could not create irq %d thread!\n", irq_num);
- status = PTR_ERR(task);
- goto fail_kthread;
- }
-
- twl_irq = irq_num;
- register_pm_notifier(&twl6030_irq_pm_notifier_block);
- return irq_base;
-
-fail_kthread:
- free_irq(irq_num, &irq_event);
+ twl6030_irq->twl_irq = irq_num;
+ register_pm_notifier(&twl6030_irq->pm_nb);
+ return 0;
fail_irq:
- for (i = irq_base; i < irq_end; i++)
- irq_set_chip_and_handler(i, NULL, NULL);
-
+ irq_domain_remove(twl6030_irq->irq_domain);
return status;
}
int twl6030_exit_irq(void)
{
- unregister_pm_notifier(&twl6030_irq_pm_notifier_block);
-
- if (twl6030_irq_base) {
- pr_err("twl6030: can't yet clean up IRQs?\n");
- return -ENOSYS;
+ if (twl6030_irq && twl6030_irq->twl_irq) {
+ unregister_pm_notifier(&twl6030_irq->pm_nb);
+ free_irq(twl6030_irq->twl_irq, NULL);
+ /*
+ * TODO: IRQ domain and allocated nested IRQ descriptors
+ * should be freed somehow here. Now It can't be done, because
+ * child devices will not be deleted during removing of
+ * TWL Core driver and they will still contain allocated
+ * virt IRQs in their Resources tables.
+ * The same prevents us from using devm_request_threaded_irq()
+ * in this module.
+ */
}
return 0;
}
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 492ee2c..daf6694 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -44,17 +44,12 @@
#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
#define TWL6040_NUM_SUPPLIES (2)
-static bool twl6040_has_vibra(struct twl6040_platform_data *pdata,
- struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *node)
{
- if (pdata && pdata->vibra)
- return true;
-
#ifdef CONFIG_OF
if (of_find_node_by_name(node, "vibra"))
return true;
#endif
-
return false;
}
@@ -63,15 +58,9 @@ int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
int ret;
unsigned int val;
- /* Vibra control registers from cache */
- if (unlikely(reg == TWL6040_REG_VIBCTLL ||
- reg == TWL6040_REG_VIBCTLR)) {
- val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)];
- } else {
- ret = regmap_read(twl6040->regmap, reg, &val);
- if (ret < 0)
- return ret;
- }
+ ret = regmap_read(twl6040->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
return val;
}
@@ -82,9 +71,6 @@ int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
int ret;
ret = regmap_write(twl6040->regmap, reg, val);
- /* Cache the vibra control registers */
- if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR)
- twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val;
return ret;
}
@@ -461,9 +447,20 @@ EXPORT_SYMBOL(twl6040_get_sysclk);
/* Get the combined status of the vibra control register */
int twl6040_get_vibralr_status(struct twl6040 *twl6040)
{
+ unsigned int reg;
+ int ret;
u8 status;
- status = twl6040->vibra_ctrl_cache[0] | twl6040->vibra_ctrl_cache[1];
+ ret = regmap_read(twl6040->regmap, TWL6040_REG_VIBCTLL, &reg);
+ if (ret != 0)
+ return ret;
+ status = reg;
+
+ ret = regmap_read(twl6040->regmap, TWL6040_REG_VIBCTLR, &reg);
+ if (ret != 0)
+ return ret;
+ status |= reg;
+
status &= (TWL6040_VIBENA | TWL6040_VIBSEL);
return status;
@@ -490,12 +487,27 @@ static bool twl6040_readable_reg(struct device *dev, unsigned int reg)
return true;
}
+static bool twl6040_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TWL6040_REG_VIBCTLL:
+ case TWL6040_REG_VIBCTLR:
+ case TWL6040_REG_INTMR:
+ return false;
+ default:
+ return true;
+ }
+}
+
static struct regmap_config twl6040_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = TWL6040_REG_STATUS, /* 0x2e */
.readable_reg = twl6040_readable_reg,
+ .volatile_reg = twl6040_volatile_reg,
+
+ .cache_type = REGCACHE_RBTREE,
};
static const struct regmap_irq twl6040_irqs[] = {
@@ -520,14 +532,13 @@ static struct regmap_irq_chip twl6040_irq_chip = {
static int twl6040_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct twl6040_platform_data *pdata = client->dev.platform_data;
struct device_node *node = client->dev.of_node;
struct twl6040 *twl6040;
struct mfd_cell *cell = NULL;
int irq, ret, children = 0;
- if (!pdata && !node) {
- dev_err(&client->dev, "Platform data is missing\n");
+ if (!node) {
+ dev_err(&client->dev, "of node is missing\n");
return -EINVAL;
}
@@ -539,23 +550,19 @@ static int twl6040_probe(struct i2c_client *client,
twl6040 = devm_kzalloc(&client->dev, sizeof(struct twl6040),
GFP_KERNEL);
- if (!twl6040) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!twl6040)
+ return -ENOMEM;
twl6040->regmap = devm_regmap_init_i2c(client, &twl6040_regmap_config);
- if (IS_ERR(twl6040->regmap)) {
- ret = PTR_ERR(twl6040->regmap);
- goto err;
- }
+ if (IS_ERR(twl6040->regmap))
+ return PTR_ERR(twl6040->regmap);
i2c_set_clientdata(client, twl6040);
twl6040->supplies[0].supply = "vio";
twl6040->supplies[1].supply = "v2v1";
ret = devm_regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
- twl6040->supplies);
+ twl6040->supplies);
if (ret != 0) {
dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
goto regulator_get_err;
@@ -576,44 +583,40 @@ static int twl6040_probe(struct i2c_client *client,
twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
/* ERRATA: Automatic power-up is not possible in ES1.0 */
- if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) {
- if (pdata)
- twl6040->audpwron = pdata->audpwron_gpio;
- else
- twl6040->audpwron = of_get_named_gpio(node,
- "ti,audpwron-gpio", 0);
- } else
+ if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
+ twl6040->audpwron = of_get_named_gpio(node,
+ "ti,audpwron-gpio", 0);
+ else
twl6040->audpwron = -EINVAL;
if (gpio_is_valid(twl6040->audpwron)) {
ret = devm_gpio_request_one(&client->dev, twl6040->audpwron,
- GPIOF_OUT_INIT_LOW, "audpwron");
+ GPIOF_OUT_INIT_LOW, "audpwron");
if (ret)
goto gpio_err;
}
- ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq,
- IRQF_ONESHOT, 0, &twl6040_irq_chip,
- &twl6040->irq_data);
+ ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq, IRQF_ONESHOT,
+ 0, &twl6040_irq_chip,&twl6040->irq_data);
if (ret < 0)
goto gpio_err;
twl6040->irq_ready = regmap_irq_get_virq(twl6040->irq_data,
- TWL6040_IRQ_READY);
+ TWL6040_IRQ_READY);
twl6040->irq_th = regmap_irq_get_virq(twl6040->irq_data,
- TWL6040_IRQ_TH);
+ TWL6040_IRQ_TH);
ret = devm_request_threaded_irq(twl6040->dev, twl6040->irq_ready, NULL,
- twl6040_readyint_handler, IRQF_ONESHOT,
- "twl6040_irq_ready", twl6040);
+ twl6040_readyint_handler, IRQF_ONESHOT,
+ "twl6040_irq_ready", twl6040);
if (ret) {
dev_err(twl6040->dev, "READY IRQ request failed: %d\n", ret);
goto readyirq_err;
}
ret = devm_request_threaded_irq(twl6040->dev, twl6040->irq_th, NULL,
- twl6040_thint_handler, IRQF_ONESHOT,
- "twl6040_irq_th", twl6040);
+ twl6040_thint_handler, IRQF_ONESHOT,
+ "twl6040_irq_th", twl6040);
if (ret) {
dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
goto thirq_err;
@@ -625,8 +628,6 @@ static int twl6040_probe(struct i2c_client *client,
/*
* The main functionality of twl6040 to provide audio on OMAP4+ systems.
* We can add the ASoC codec child whenever this driver has been loaded.
- * The ASoC codec can work without pdata, pass the platform_data only if
- * it has been provided.
*/
irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_PLUG);
cell = &twl6040->cells[children];
@@ -635,13 +636,10 @@ static int twl6040_probe(struct i2c_client *client,
twl6040_codec_rsrc[0].end = irq;
cell->resources = twl6040_codec_rsrc;
cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
- if (pdata && pdata->codec) {
- cell->platform_data = pdata->codec;
- cell->pdata_size = sizeof(*pdata->codec);
- }
children++;
- if (twl6040_has_vibra(pdata, node)) {
+ /* Vibra input driver support */
+ if (twl6040_has_vibra(node)) {
irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_VIB);
cell = &twl6040->cells[children];
@@ -650,28 +648,13 @@ static int twl6040_probe(struct i2c_client *client,
twl6040_vibra_rsrc[0].end = irq;
cell->resources = twl6040_vibra_rsrc;
cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
-
- if (pdata && pdata->vibra) {
- cell->platform_data = pdata->vibra;
- cell->pdata_size = sizeof(*pdata->vibra);
- }
children++;
}
- /*
- * Enable the GPO driver in the following cases:
- * DT booted kernel or legacy boot with valid gpo platform_data
- */
- if (!pdata || (pdata && pdata->gpo)) {
- cell = &twl6040->cells[children];
- cell->name = "twl6040-gpo";
-
- if (pdata) {
- cell->platform_data = pdata->gpo;
- cell->pdata_size = sizeof(*pdata->gpo);
- }
- children++;
- }
+ /* GPO support */
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-gpo";
+ children++;
ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
NULL, 0, NULL);
@@ -690,7 +673,7 @@ gpio_err:
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
regulator_get_err:
i2c_set_clientdata(client, NULL);
-err:
+
return ret;
}
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index e9031fa..ebb20ed 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -52,7 +52,7 @@ static int ucb1400_core_probe(struct device *dev)
struct ucb1400_ts ucb_ts;
struct ucb1400_gpio ucb_gpio;
struct snd_ac97 *ac97;
- struct ucb1400_pdata *pdata = dev->platform_data;
+ struct ucb1400_pdata *pdata = dev_get_platdata(dev);
memset(&ucb_ts, 0, sizeof(ucb_ts));
memset(&ucb_gpio, 0, sizeof(ucb_gpio));
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 70f02da..d5966e6 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -393,22 +393,24 @@ static struct irq_chip ucb1x00_irqchip = {
static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
{
struct ucb1x00_dev *dev;
- int ret = -ENOMEM;
+ int ret;
dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL);
- if (dev) {
- dev->ucb = ucb;
- dev->drv = drv;
-
- ret = drv->add(dev);
-
- if (ret == 0) {
- list_add_tail(&dev->dev_node, &ucb->devs);
- list_add_tail(&dev->drv_node, &drv->devs);
- } else {
- kfree(dev);
- }
+ if (!dev)
+ return -ENOMEM;
+
+ dev->ucb = ucb;
+ dev->drv = drv;
+
+ ret = drv->add(dev);
+ if (ret) {
+ kfree(dev);
+ return ret;
}
+
+ list_add_tail(&dev->dev_node, &ucb->devs);
+ list_add_tail(&dev->drv_node, &drv->devs);
+
return ret;
}
@@ -669,9 +671,10 @@ void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
mutex_unlock(&ucb1x00_mutex);
}
+#ifdef CONFIG_PM_SLEEP
static int ucb1x00_suspend(struct device *dev)
{
- struct ucb1x00_plat_data *pdata = dev->platform_data;
+ struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
struct ucb1x00 *ucb = dev_get_drvdata(dev);
struct ucb1x00_dev *udev;
@@ -703,7 +706,7 @@ static int ucb1x00_suspend(struct device *dev)
static int ucb1x00_resume(struct device *dev)
{
- struct ucb1x00_plat_data *pdata = dev->platform_data;
+ struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
struct ucb1x00 *ucb = dev_get_drvdata(dev);
struct ucb1x00_dev *udev;
@@ -736,6 +739,7 @@ static int ucb1x00_resume(struct device *dev)
mutex_unlock(&ucb1x00_mutex);
return 0;
}
+#endif
static const struct dev_pm_ops ucb1x00_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ucb1x00_suspend, ucb1x00_resume)
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index edbe6c1..f7c52d9 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -172,12 +172,9 @@ static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume)
static int wl1273_core_remove(struct i2c_client *client)
{
- struct wl1273_core *core = i2c_get_clientdata(client);
-
dev_dbg(&client->dev, "%s\n", __func__);
mfd_remove_devices(&client->dev);
- kfree(core);
return 0;
}
@@ -185,7 +182,7 @@ static int wl1273_core_remove(struct i2c_client *client)
static int wl1273_core_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct wl1273_fm_platform_data *pdata = client->dev.platform_data;
+ struct wl1273_fm_platform_data *pdata = dev_get_platdata(&client->dev);
struct wl1273_core *core;
struct mfd_cell *cell;
int children = 0;
@@ -203,7 +200,7 @@ static int wl1273_core_probe(struct i2c_client *client,
return -EINVAL;
}
- core = kzalloc(sizeof(*core), GFP_KERNEL);
+ core = devm_kzalloc(&client->dev, sizeof(*core), GFP_KERNEL);
if (!core)
return -ENOMEM;
@@ -249,7 +246,6 @@ static int wl1273_core_probe(struct i2c_client *client,
err:
pdata->free_resources();
- kfree(core);
dev_dbg(&client->dev, "%s\n", __func__);
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 2a79723..3113e39 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -468,12 +468,14 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
{ 0x00000177, 0x0281 }, /* R375 - FLL1 Loop Filter Test 1 */
{ 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
+ { 0x00000179, 0x0000 }, /* R376 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
{ 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
{ 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
{ 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
{ 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
+ { 0x00000187, 0x0001 }, /* R390 - FLL1 Synchroniser 7 */
{ 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
{ 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
{ 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
@@ -484,12 +486,14 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
{ 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
{ 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
+ { 0x00000199, 0x0000 }, /* R408 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
{ 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
{ 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
{ 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
{ 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
+ { 0x000001A7, 0x0001 }, /* R422 - FLL2 Synchroniser 7 */
{ 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
{ 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
@@ -503,6 +507,11 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
+ { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
+ { 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
+ { 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */
+ { 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
+ { 0x000002A9, 0x300A }, /* R681 - Mic Detect Level 4 */
{ 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
{ 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
{ 0x00000300, 0x0000 }, /* R768 - Input Enables */
@@ -1392,6 +1401,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_4:
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
+ case ARIZONA_FLL1_CONTROL_7:
case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
@@ -1400,6 +1410,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_SYNCHRONISER_4:
case ARIZONA_FLL1_SYNCHRONISER_5:
case ARIZONA_FLL1_SYNCHRONISER_6:
+ case ARIZONA_FLL1_SYNCHRONISER_7:
case ARIZONA_FLL1_SPREAD_SPECTRUM:
case ARIZONA_FLL1_GPIO_CLOCK:
case ARIZONA_FLL2_CONTROL_1:
@@ -1408,6 +1419,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_4:
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
+ case ARIZONA_FLL2_CONTROL_7:
case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
@@ -1416,6 +1428,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_SYNCHRONISER_4:
case ARIZONA_FLL2_SYNCHRONISER_5:
case ARIZONA_FLL2_SYNCHRONISER_6:
+ case ARIZONA_FLL2_SYNCHRONISER_7:
case ARIZONA_FLL2_SPREAD_SPECTRUM:
case ARIZONA_FLL2_GPIO_CLOCK:
case ARIZONA_MIC_CHARGE_PUMP_1:
@@ -1430,6 +1443,10 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_MIC_DETECT_1:
case ARIZONA_MIC_DETECT_2:
case ARIZONA_MIC_DETECT_3:
+ case ARIZONA_MIC_DETECT_LEVEL_1:
+ case ARIZONA_MIC_DETECT_LEVEL_2:
+ case ARIZONA_MIC_DETECT_LEVEL_3:
+ case ARIZONA_MIC_DETECT_LEVEL_4:
case ARIZONA_MIC_NOISE_MIX_CONTROL_1:
case ARIZONA_JACK_DETECT_ANALOGUE:
case ARIZONA_INPUT_ENABLES:
@@ -2332,6 +2349,7 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_IRQ_PIN_STATUS:
case ARIZONA_AOD_IRQ1:
case ARIZONA_AOD_IRQ2:
+ case ARIZONA_FX_CTRL2:
case ARIZONA_ASRC_STATUS:
case ARIZONA_DSP_STATUS:
case ARIZONA_DSP1_CONTROL_1:
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 521340a..5c459f4 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1618,7 +1618,7 @@ EXPORT_SYMBOL_GPL(wm831x_regmap_config);
*/
int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
{
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
int rev, wm831x_num;
enum wm831x_parent parent;
int ret, i;
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 804e56e..64e512e 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -571,7 +571,7 @@ static struct irq_domain_ops wm831x_irq_domain_ops = {
int wm831x_irq_init(struct wm831x *wm831x, int irq)
{
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct irq_domain *domain;
int i, ret, irq_base;
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index e7ed14f66..07de3cc 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -34,7 +34,6 @@ static int wm831x_spi_probe(struct spi_device *spi)
if (wm831x == NULL)
return -ENOMEM;
- spi->bits_per_word = 16;
spi->mode = SPI_MODE_0;
spi_set_drvdata(spi, wm831x);
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 2e57101..f919def 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -27,6 +27,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8350 *wm8350;
+ struct wm8350_platform_data *pdata = dev_get_platdata(&i2c->dev);
int ret = 0;
wm8350 = devm_kzalloc(&i2c->dev, sizeof(struct wm8350), GFP_KERNEL);
@@ -44,7 +45,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8350);
wm8350->dev = &i2c->dev;
- return wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
+ return wm8350_device_init(wm8350, i2c->irq, pdata);
}
static int wm8350_i2c_remove(struct i2c_client *i2c)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 639ca35..d66d256 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -178,7 +178,7 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
wm8400->dev = &i2c->dev;
i2c_set_clientdata(i2c, wm8400);
- ret = wm8400_init(wm8400, i2c->dev.platform_data);
+ ret = wm8400_init(wm8400, dev_get_platdata(&i2c->dev));
if (ret != 0)
goto err;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 781115e..e1c283e 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -201,35 +201,7 @@ static int wm8994_suspend(struct device *dev)
int ret;
/* Don't actually go through with the suspend if the CODEC is
- * still active (eg, for audio passthrough from CP. */
- ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
- if (ret < 0) {
- dev_err(dev, "Failed to read power status: %d\n", ret);
- } else if (ret & WM8994_VMID_SEL_MASK) {
- dev_dbg(dev, "CODEC still active, ignoring suspend\n");
- return 0;
- }
-
- ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_4);
- if (ret < 0) {
- dev_err(dev, "Failed to read power status: %d\n", ret);
- } else if (ret & (WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA |
- WM8994_AIF1ADC2L_ENA | WM8994_AIF1ADC2R_ENA |
- WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC1R_ENA)) {
- dev_dbg(dev, "CODEC still active, ignoring suspend\n");
- return 0;
- }
-
- ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_5);
- if (ret < 0) {
- dev_err(dev, "Failed to read power status: %d\n", ret);
- } else if (ret & (WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA |
- WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA |
- WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA)) {
- dev_dbg(dev, "CODEC still active, ignoring suspend\n");
- return 0;
- }
-
+ * still active for accessory detect. */
switch (wm8994->type) {
case WM8958:
case WM1811:
@@ -245,20 +217,6 @@ static int wm8994_suspend(struct device *dev)
break;
}
- switch (wm8994->type) {
- case WM1811:
- ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
- if (ret < 0) {
- dev_err(dev, "Failed to read jackdet: %d\n", ret);
- } else if (ret & WM1811_JACKDET_MODE_MASK) {
- dev_dbg(dev, "CODEC still active, ignoring suspend\n");
- return 0;
- }
- break;
- default:
- break;
- }
-
/* Disable LDO pulldowns while the device is suspended if we
* don't know that something will be driving them. */
if (!wm8994->ldo_ena_always_driven)
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index d3a184a..e74dedd 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -193,7 +193,7 @@ int wm8994_irq_init(struct wm8994 *wm8994)
{
int ret;
unsigned long irqflags;
- struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
if (!wm8994->irq) {
dev_warn(wm8994->dev,
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
index 5acb9c5..22429b8 100644
--- a/drivers/misc/cb710/Kconfig
+++ b/drivers/misc/cb710/Kconfig
@@ -1,6 +1,6 @@
config CB710_CORE
tristate "ENE CB710/720 Flash memory card reader support"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
help
This option enables support for PCI ENE CB710/720 Flash memory card
reader found in some laptops (ie. some versions of HP Compaq nx9500).
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index cd0b7f4..1a3163f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -812,7 +812,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort.
*/
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, int *ecc_err)
+ struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
{
bool prev_cmd_status_valid = true;
u32 status, stop_status = 0;
@@ -850,6 +850,16 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
(brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
*ecc_err = 1;
+ /* Flag General errors */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if ((status & R1_ERROR) ||
+ (brq->stop.resp[0] & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0], status);
+ *gen_err = 1;
+ }
+
/*
* Check the current card state. If it is in some data transfer
* mode, tell it to stop (and hopefully transition back to TRAN.)
@@ -869,6 +879,13 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return ERR_ABORT;
if (stop_status & R1_CARD_ECC_FAILED)
*ecc_err = 1;
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if (stop_status & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ stop_status);
+ *gen_err = 1;
+ }
}
/* Check for set block count errors */
@@ -1097,7 +1114,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
- int ecc_err = 0;
+ int ecc_err = 0, gen_err = 0;
/*
* sbc.error indicates a problem with the set block count
@@ -1111,7 +1128,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
*/
if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
case ERR_RETRY:
return MMC_BLK_RETRY;
case ERR_ABORT:
@@ -1143,6 +1160,14 @@ static int mmc_blk_err_check(struct mmc_card *card,
u32 status;
unsigned long timeout;
+ /* Check stop command response */
+ if (brq->stop.resp[0] & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0]);
+ gen_err = 1;
+ }
+
timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
do {
int err = get_card_status(card, &status, 5);
@@ -1152,6 +1177,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_CMD_ERR;
}
+ if (status & R1_ERROR) {
+ pr_err("%s: %s: general error sending status command, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ status);
+ gen_err = 1;
+ }
+
/* Timeout if the device never becomes ready for data
* and never leaves the program state.
*/
@@ -1171,6 +1203,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
(R1_CURRENT_STATE(status) == R1_STATE_PRG));
}
+ /* if general error occurs, retry the write operation. */
+ if (gen_err) {
+ pr_warn("%s: retrying write for general error\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY;
+ }
+
if (brq->data.error) {
pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, brq->data.error,
@@ -2191,10 +2230,10 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* is freeing the queue that stops new requests
* from being accepted.
*/
+ card = md->queue.card;
mmc_cleanup_queue(&md->queue);
if (md->flags & MMC_BLK_PACKED_CMD)
mmc_packed_clean(&md->queue);
- card = md->queue.card;
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index a69df52..0c0fc52 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2849,18 +2849,12 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
struct seq_file *sf = (struct seq_file *)file->private_data;
struct mmc_card *card = (struct mmc_card *)sf->private;
struct mmc_test_card *test;
- char lbuf[12];
long testcase;
+ int ret;
- if (count >= sizeof(lbuf))
- return -EINVAL;
-
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
- lbuf[count] = '\0';
-
- if (strict_strtol(lbuf, 10, &testcase))
- return -EINVAL;
+ ret = kstrtol_from_user(buf, count, 10, &testcase);
+ if (ret)
+ return ret;
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
if (!test)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5d08855..bf18b6b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -27,6 +27,7 @@
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -1196,6 +1197,49 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
}
EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
+#ifdef CONFIG_OF
+
+/**
+ * mmc_of_parse_voltage - return mask of supported voltages
+ * @np: The device node need to be parsed.
+ * @mask: mask of voltages available for MMC/SD/SDIO
+ *
+ * 1. Return zero on success.
+ * 2. Return negative errno: voltage-range is invalid.
+ */
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
+{
+ const u32 *voltage_ranges;
+ int num_ranges, i;
+
+ voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+ if (!voltage_ranges || !num_ranges) {
+ pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ const int j = i * 2;
+ u32 ocr_mask;
+
+ ocr_mask = mmc_vddrange_to_ocrmask(
+ be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
+ if (!ocr_mask) {
+ pr_err("%s: voltage-range #%d is invalid\n",
+ np->full_name, i);
+ return -EINVAL;
+ }
+ *mask |= ocr_mask;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_of_parse_voltage);
+
+#endif /* CONFIG_OF */
+
#ifdef CONFIG_REGULATOR
/**
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 6fb6f77..49bc403 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -374,7 +374,7 @@ int mmc_of_parse(struct mmc_host *host)
if (!(flags & OF_GPIO_ACTIVE_LOW))
gpio_inv_cd = true;
- ret = mmc_gpio_request_cd(host, gpio);
+ ret = mmc_gpio_request_cd(host, gpio, 0);
if (ret < 0) {
dev_err(host->parent,
"Failed to request CD GPIO #%d: %d!\n",
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 837fc73..ef18348 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -531,6 +531,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
data.sg = &sg;
data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 176d125..5e8823d 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -215,7 +215,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
- int err, i;
+ int err, i, max_au;
u32 *ssr;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -239,12 +239,15 @@ static int mmc_read_ssr(struct mmc_card *card)
for (i = 0; i < 16; i++)
ssr[i] = be32_to_cpu(ssr[i]);
+ /* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
+ max_au = card->scr.sda_spec3 ? 0xF : 0x9;
+
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
au = UNSTUFF_BITS(ssr, 428 - 384, 4);
- if (au > 0 && au <= 9) {
+ if (au > 0 && au <= max_au) {
card->ssr.au = 1 << (au + 4);
es = UNSTUFF_BITS(ssr, 408 - 384, 16);
et = UNSTUFF_BITS(ssr, 402 - 384, 6);
@@ -942,13 +945,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host)) {
err = mmc_send_relative_addr(host, &card->rca);
if (err)
- return err;
+ goto free_card;
}
if (!oldcard) {
err = mmc_sd_get_csd(host, card);
if (err)
- return err;
+ goto free_card;
mmc_decode_cid(card);
}
@@ -959,7 +962,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
- return err;
+ goto free_card;
}
err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 3242351..46596b71 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -135,6 +135,7 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
* mmc_gpio_request_cd - request a gpio for card-detection
* @host: mmc host
* @gpio: gpio number requested
+ * @debounce: debounce time in microseconds
*
* As devm_* managed functions are used in mmc_gpio_request_cd(), client
* drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
@@ -143,9 +144,14 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
* switching for card-detection, they are responsible for calling
* mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
*
+ * If GPIO debouncing is desired, set the debounce parameter to a non-zero
+ * value. The caller is responsible for ensuring that the GPIO driver associated
+ * with the GPIO supports debouncing, otherwise an error will be returned.
+ *
* Returns zero on success, else an error.
*/
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+ unsigned int debounce)
{
struct mmc_gpio *ctx;
int irq = gpio_to_irq(gpio);
@@ -167,6 +173,12 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
*/
return ret;
+ if (debounce) {
+ ret = gpio_set_debounce(gpio, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* Even if gpio_to_irq() returns a valid IRQ number, the platform might
* still prefer to poll, e.g., because that IRQ number is already used
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8a4c066..7fc5099 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -284,11 +284,11 @@ config MMC_OMAP
config MMC_OMAP_HS
tristate "TI OMAP High Speed Multimedia Card Interface support"
- depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
This selects the TI OMAP High Speed Multimedia card Interface.
- If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
- Multimedia Card slot, say Y or M here.
+ If you have an omap2plus board with a Multimedia Card slot,
+ say Y or M here.
If unsure, say N.
@@ -487,7 +487,7 @@ config MMC_SDHI
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
@@ -530,7 +530,7 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
config MMC_DW
tristate "Synopsys DesignWare Memory Card Interface"
- depends on ARM
+ depends on ARC || ARM
help
This selects support for the Synopsys DesignWare Mobile Storage IP
block, this provides host support for SD and MMC interfaces, in both
@@ -569,7 +569,7 @@ config MMC_DW_EXYNOS
config MMC_DW_SOCFPGA
tristate "SOCFPGA specific extensions for Synopsys DW Memory Card Interface"
- depends on MMC_DW
+ depends on MMC_DW && MFD_SYSCON
select MMC_DW_PLTFM
help
This selects support for Altera SoCFPGA specific extensions to the
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index d422e21..c41d0c3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -52,8 +52,6 @@ obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
-obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
-
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index bdb84da..69e438e 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -378,6 +378,8 @@ static int atmci_regs_show(struct seq_file *s, void *v)
{
struct atmel_mci *host = s->private;
u32 *buf;
+ int ret = 0;
+
buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
if (!buf)
@@ -388,12 +390,16 @@ static int atmci_regs_show(struct seq_file *s, void *v)
* not disabling interrupts, so IMR and SR may not be
* consistent.
*/
+ ret = clk_prepare_enable(host->mck);
+ if (ret)
+ goto out;
+
spin_lock_bh(&host->lock);
- clk_enable(host->mck);
memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
- clk_disable(host->mck);
spin_unlock_bh(&host->lock);
+ clk_disable_unprepare(host->mck);
+
seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
@@ -442,9 +448,10 @@ static int atmci_regs_show(struct seq_file *s, void *v)
val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
}
+out:
kfree(buf);
- return 0;
+ return ret;
}
static int atmci_regs_open(struct inode *inode, struct file *file)
@@ -1262,6 +1269,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
unsigned int i;
+ bool unprepare_clk;
slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
switch (ios->bus_width) {
@@ -1277,9 +1285,13 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned int clock_min = ~0U;
u32 clkdiv;
+ clk_prepare(host->mck);
+ unprepare_clk = true;
+
spin_lock_bh(&host->lock);
if (!host->mode_reg) {
clk_enable(host->mck);
+ unprepare_clk = false;
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
if (host->caps.has_cfg_reg)
@@ -1347,6 +1359,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
} else {
bool any_slot_active = false;
+ unprepare_clk = false;
+
spin_lock_bh(&host->lock);
slot->clock = 0;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
@@ -1360,12 +1374,16 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->mode_reg) {
atmci_readl(host, ATMCI_MR);
clk_disable(host->mck);
+ unprepare_clk = true;
}
host->mode_reg = 0;
}
spin_unlock_bh(&host->lock);
}
+ if (unprepare_clk)
+ clk_unprepare(host->mck);
+
switch (ios->power_mode) {
case MMC_POWER_UP:
set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
@@ -2376,10 +2394,12 @@ static int __init atmci_probe(struct platform_device *pdev)
if (!host->regs)
goto err_ioremap;
- clk_enable(host->mck);
+ ret = clk_prepare_enable(host->mck);
+ if (ret)
+ goto err_request_irq;
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
host->bus_hz = clk_get_rate(host->mck);
- clk_disable(host->mck);
+ clk_disable_unprepare(host->mck);
host->mapbase = regs->start;
@@ -2482,11 +2502,11 @@ static int __exit atmci_remove(struct platform_device *pdev)
atmci_cleanup_slot(host->slot[i], i);
}
- clk_enable(host->mck);
+ clk_prepare_enable(host->mck);
atmci_writel(host, ATMCI_IDR, ~0UL);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
atmci_readl(host, ATMCI_SR);
- clk_disable(host->mck);
+ clk_disable_unprepare(host->mck);
if (host->dma.chan)
dma_release_channel(host->dma.chan);
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 866edef..6a1fa21 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -39,6 +39,7 @@ enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS4210,
DW_MCI_TYPE_EXYNOS4412,
DW_MCI_TYPE_EXYNOS5250,
+ DW_MCI_TYPE_EXYNOS5420,
};
/* Exynos implementation specific driver private data */
@@ -62,6 +63,9 @@ static struct dw_mci_exynos_compatible {
}, {
.compatible = "samsung,exynos5250-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS5250,
+ }, {
+ .compatible = "samsung,exynos5420-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5420,
},
};
@@ -90,7 +94,8 @@ static int dw_mci_exynos_setup_clock(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250)
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420)
host->bus_hz /= (priv->ciu_div + 1);
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV;
@@ -173,6 +178,8 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5250-dw-mshc",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos5420-dw-mshc",
+ .data = &exynos_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index b456b0c..f70546a 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -59,7 +59,9 @@ static int dw_mci_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- host->regs = pcim_iomap_table(pdev)[0];
+ host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO];
+
+ pci_set_master(pdev);
ret = dw_mci_probe(host);
if (ret)
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index ee52556..2089752 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
{
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 5424073..018f365 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1601,18 +1601,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
- if (pending) {
-
- /*
- * DTO fix - version 2.10a and below, and only if internal DMA
- * is configured.
- */
- if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
- if (!pending &&
- ((mci_readl(host, STATUS) >> 17) & 0x1fff))
- pending |= SDMMC_INT_DATA_OVER;
- }
+ /*
+ * DTO fix - version 2.10a and below, and only if internal DMA
+ * is configured.
+ */
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+ if (!pending &&
+ ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+ pending |= SDMMC_INT_DATA_OVER;
+ }
+ if (pending) {
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 0308c9f..6651633 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -713,7 +713,7 @@ static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
if (gpio_is_valid(pdata->gpio_card_detect)) {
- ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect);
+ ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
if (ret)
return ret;
}
@@ -783,9 +783,8 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(&pdev->dev, res);
- if (!host->base) {
- ret = -EBUSY;
- dev_err(&pdev->dev, "Failed to ioremap base memory\n");
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
goto err_free_host;
}
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 74145d1..0a87e56 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -36,6 +36,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
+#include <linux/mmc/slot-gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
@@ -1272,33 +1273,11 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
-static int mmc_spi_get_ro(struct mmc_host *mmc)
-{
- struct mmc_spi_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_ro)
- return !!host->pdata->get_ro(mmc->parent);
- /*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
- */
- return -ENOSYS;
-}
-
-static int mmc_spi_get_cd(struct mmc_host *mmc)
-{
- struct mmc_spi_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_cd)
- return !!host->pdata->get_cd(mmc->parent);
- return -ENOSYS;
-}
-
static const struct mmc_host_ops mmc_spi_ops = {
.request = mmc_spi_request,
.set_ios = mmc_spi_set_ios,
- .get_ro = mmc_spi_get_ro,
- .get_cd = mmc_spi_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
};
@@ -1324,6 +1303,7 @@ static int mmc_spi_probe(struct spi_device *spi)
struct mmc_host *mmc;
struct mmc_spi_host *host;
int status;
+ bool has_ro = false;
/* We rely on full duplex transfers, mostly to reduce
* per-transfer overheads (by making fewer transfers).
@@ -1448,18 +1428,33 @@ static int mmc_spi_probe(struct spi_device *spi)
}
/* pass platform capabilities, if any */
- if (host->pdata)
+ if (host->pdata) {
mmc->caps |= host->pdata->caps;
+ mmc->caps2 |= host->pdata->caps2;
+ }
status = mmc_add_host(mmc);
if (status != 0)
goto fail_add_host;
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) {
+ status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio,
+ host->pdata->cd_debounce);
+ if (status != 0)
+ goto fail_add_host;
+ }
+
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
+ has_ro = true;
+ status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio);
+ if (status != 0)
+ goto fail_add_host;
+ }
+
dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
dev_name(&mmc->class_dev),
host->dma_dev ? "" : ", no DMA",
- (host->pdata && host->pdata->get_ro)
- ? "" : ", no WP",
+ has_ro ? "" : ", no WP",
(host->pdata && host->pdata->setpower)
? "" : ", no poweroff",
(mmc->caps & MMC_CAP_NEEDS_POLL)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 4ddd83f..06c5b0b 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -757,7 +757,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
if (mvsd_data->gpio_card_detect &&
gpio_is_valid(mvsd_data->gpio_card_detect)) {
ret = mmc_gpio_request_cd(mmc,
- mvsd_data->gpio_card_detect);
+ mvsd_data->gpio_card_detect,
+ 0);
if (ret)
goto out;
} else {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index f38d75f..e1fa3ef 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -102,12 +102,15 @@ static int mxs_mmc_get_cd(struct mmc_host *mmc)
BM_SSP_STATUS_CARD_DETECT) ^ host->cd_inverted;
}
-static void mxs_mmc_reset(struct mxs_mmc_host *host)
+static int mxs_mmc_reset(struct mxs_mmc_host *host)
{
struct mxs_ssp *ssp = &host->ssp;
u32 ctrl0, ctrl1;
+ int ret;
- stmp_reset_block(ssp->base);
+ ret = stmp_reset_block(ssp->base);
+ if (ret)
+ return ret;
ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -132,6 +135,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
writel(ctrl0, ssp->base + HW_SSP_CTRL0);
writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
+ return 0;
}
static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -618,21 +622,25 @@ static int mxs_mmc_probe(struct platform_device *pdev)
}
}
- ssp->clk = clk_get(&pdev->dev, NULL);
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ssp->clk)) {
ret = PTR_ERR(ssp->clk);
goto out_mmc_free;
}
clk_prepare_enable(ssp->clk);
- mxs_mmc_reset(host);
+ ret = mxs_mmc_reset(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
+ goto out_clk_disable;
+ }
ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!ssp->dmach) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
ret = -ENODEV;
- goto out_clk_put;
+ goto out_clk_disable;
}
/* set mmc core parameters */
@@ -685,9 +693,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
out_free_dma:
if (ssp->dmach)
dma_release_channel(ssp->dmach);
-out_clk_put:
+out_clk_disable:
clk_disable_unprepare(ssp->clk);
- clk_put(ssp->clk);
out_mmc_free:
mmc_free_host(mmc);
return ret;
@@ -705,7 +712,6 @@ static int mxs_mmc_remove(struct platform_device *pdev)
dma_release_channel(ssp->dmach);
clk_disable_unprepare(ssp->clk);
- clk_put(ssp->clk);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index d720b5e..6e218fb 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -50,25 +50,6 @@ static struct of_mmc_spi *to_of_mmc_spi(struct device *dev)
return container_of(dev->platform_data, struct of_mmc_spi, pdata);
}
-static int of_mmc_spi_read_gpio(struct device *dev, int gpio_num)
-{
- struct of_mmc_spi *oms = to_of_mmc_spi(dev);
- bool active_low = oms->alow_gpios[gpio_num];
- bool value = gpio_get_value(oms->gpios[gpio_num]);
-
- return active_low ^ value;
-}
-
-static int of_mmc_spi_get_cd(struct device *dev)
-{
- return of_mmc_spi_read_gpio(dev, CD_GPIO);
-}
-
-static int of_mmc_spi_get_ro(struct device *dev)
-{
- return of_mmc_spi_read_gpio(dev, WP_GPIO);
-}
-
static int of_mmc_spi_init(struct device *dev,
irqreturn_t (*irqhandler)(int, void *), void *mmc)
{
@@ -130,20 +111,22 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
if (!gpio_is_valid(oms->gpios[i]))
continue;
- ret = gpio_request(oms->gpios[i], dev_name(dev));
- if (ret < 0) {
- oms->gpios[i] = -EINVAL;
- continue;
- }
-
if (gpio_flags & OF_GPIO_ACTIVE_LOW)
oms->alow_gpios[i] = true;
}
- if (gpio_is_valid(oms->gpios[CD_GPIO]))
- oms->pdata.get_cd = of_mmc_spi_get_cd;
- if (gpio_is_valid(oms->gpios[WP_GPIO]))
- oms->pdata.get_ro = of_mmc_spi_get_ro;
+ if (gpio_is_valid(oms->gpios[CD_GPIO])) {
+ oms->pdata.cd_gpio = oms->gpios[CD_GPIO];
+ oms->pdata.flags |= MMC_SPI_USE_CD_GPIO;
+ if (!oms->alow_gpios[CD_GPIO])
+ oms->pdata.caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ }
+ if (gpio_is_valid(oms->gpios[WP_GPIO])) {
+ oms->pdata.ro_gpio = oms->gpios[WP_GPIO];
+ oms->pdata.flags |= MMC_SPI_USE_RO_GPIO;
+ if (!oms->alow_gpios[WP_GPIO])
+ oms->pdata.caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
oms->detect_irq = irq_of_parse_and_map(np, 0);
if (oms->detect_irq != 0) {
@@ -166,15 +149,10 @@ void mmc_spi_put_pdata(struct spi_device *spi)
struct device *dev = &spi->dev;
struct device_node *np = dev->of_node;
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
- int i;
if (!dev->platform_data || !np)
return;
- for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) {
- if (gpio_is_valid(oms->gpios[i]))
- gpio_free(oms->gpios[i]);
- }
kfree(oms);
dev->platform_data = NULL;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 1865321..6ac63df 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/dmaengine.h>
#include <linux/seq_file.h>
+#include <linux/sizes.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -1041,6 +1042,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
}
}
+ OMAP_HSMMC_WRITE(host->base, STAT, status);
if (end_cmd || ((status & CC_EN) && host->cmd))
omap_hsmmc_cmd_done(host, host->cmd);
if ((end_trans || (status & TC_EN)) && host->mrq)
@@ -1060,7 +1062,6 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
omap_hsmmc_do_irq(host, status);
/* Flush posted write */
- OMAP_HSMMC_WRITE(host->base, STAT, status);
status = OMAP_HSMMC_READ(host->base, STAT);
}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 82a35b9..375a880e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1,6 +1,6 @@
/* Realtek PCI-Express SD/MMC Card Interface driver
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/module.h>
@@ -56,7 +55,6 @@ struct realtek_pci_sdmmc {
bool double_clk;
bool eject;
bool initial_mode;
- bool ddr_mode;
int power_state;
#define SDMMC_POWER_ON 1
#define SDMMC_POWER_OFF 0
@@ -228,6 +226,7 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
int stat_idx = 0;
u8 rsp_type;
int rsp_len = 5;
+ bool clock_toggled = false;
dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
__func__, cmd_idx, arg);
@@ -271,6 +270,8 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
0xFF, SD_CLK_TOGGLE_EN);
if (err < 0)
goto out;
+
+ clock_toggled = true;
}
rtsx_pci_init_cmd(pcr);
@@ -351,6 +352,10 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
out:
cmd->error = err;
+
+ if (err && clock_toggled)
+ rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
}
static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
@@ -475,18 +480,24 @@ static void sd_normal_rw(struct realtek_pci_sdmmc *host,
kfree(buf);
}
-static int sd_change_phase(struct realtek_pci_sdmmc *host, u8 sample_point)
+static int sd_change_phase(struct realtek_pci_sdmmc *host,
+ u8 sample_point, bool rx)
{
struct rtsx_pcr *pcr = host->pcr;
int err;
- dev_dbg(sdmmc_dev(host), "%s: sample_point = %d\n",
- __func__, sample_point);
+ dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
+ __func__, rx ? "RX" : "TX", sample_point);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPRX_CTL, 0x1F, sample_point);
+ if (rx)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_VPRX_CTL, 0x1F, sample_point);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_VPTX_CTL, 0x1F, sample_point);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
@@ -602,7 +613,7 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
int err;
u8 cmd[5] = {0};
- err = sd_change_phase(host, sample_point);
+ err = sd_change_phase(host, sample_point, true);
if (err < 0)
return err;
@@ -664,7 +675,7 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
if (final_phase == 0xFF)
return -EINVAL;
- err = sd_change_phase(host, final_phase);
+ err = sd_change_phase(host, final_phase, true);
if (err < 0)
return err;
} else {
@@ -833,14 +844,11 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
return err;
}
-static int sd_set_timing(struct realtek_pci_sdmmc *host,
- unsigned char timing, bool *ddr_mode)
+static int sd_set_timing(struct realtek_pci_sdmmc *host, unsigned char timing)
{
struct rtsx_pcr *pcr = host->pcr;
int err = 0;
- *ddr_mode = false;
-
rtsx_pci_init_cmd(pcr);
switch (timing) {
@@ -857,8 +865,6 @@ static int sd_set_timing(struct realtek_pci_sdmmc *host,
break;
case MMC_TIMING_UHS_DDR50:
- *ddr_mode = true;
-
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1,
0x0C | SD_ASYNC_FIFO_NOT_RST,
SD_DDR_MODE | SD_ASYNC_FIFO_NOT_RST);
@@ -926,7 +932,7 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sd_set_bus_width(host, ios->bus_width);
sd_set_power_mode(host, ios->power_mode);
- sd_set_timing(host, ios->timing, &host->ddr_mode);
+ sd_set_timing(host, ios->timing);
host->vpclk = false;
host->double_clk = true;
@@ -1121,11 +1127,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
goto out;
}
+out:
/* Stop toggle SD clock in idle */
err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
-out:
mutex_unlock(&pcr->pcr_mutex);
return err;
@@ -1148,9 +1154,35 @@ static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
rtsx_pci_start_run(pcr);
- if (!host->ddr_mode)
- err = sd_tuning_rx(host, MMC_SEND_TUNING_BLOCK);
+ /* Set initial TX phase */
+ switch (mmc->ios.timing) {
+ case MMC_TIMING_UHS_SDR104:
+ err = sd_change_phase(host, SDR104_TX_PHASE(pcr), false);
+ break;
+
+ case MMC_TIMING_UHS_SDR50:
+ err = sd_change_phase(host, SDR50_TX_PHASE(pcr), false);
+ break;
+
+ case MMC_TIMING_UHS_DDR50:
+ err = sd_change_phase(host, DDR50_TX_PHASE(pcr), false);
+ break;
+
+ default:
+ err = 0;
+ }
+ if (err)
+ goto out;
+
+ /* Tuning RX phase */
+ if ((mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
+ (mmc->ios.timing == MMC_TIMING_UHS_SDR50))
+ err = sd_tuning_rx(host, opcode);
+ else if (mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ err = sd_change_phase(host, DDR50_RX_PHASE(pcr), true);
+
+out:
mutex_unlock(&pcr->pcr_mutex);
return err;
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 0584a1c..36fa2df 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -119,7 +119,7 @@ static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
return byte;
}
-unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
+static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
{
return MIN_FREQ;
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1dd5ba8..abc8cf0 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -616,7 +616,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
/* card_detect */
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio);
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request card-detect gpio!\n");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 15039e2..e328252 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -316,6 +316,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
/* call to generic mmc_of_parse to support additional capabilities */
mmc_of_parse(host->mmc);
+ mmc_of_parse_voltage(np, &host->ocr_mask);
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index bf99359..793dacd 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -278,7 +278,8 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
host->mmc->pm_caps |= pdata->pm_caps;
if (gpio_is_valid(pdata->ext_cd_gpio)) {
- ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio);
+ ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio,
+ 0);
if (ret) {
dev_err(mmc_dev(host->mmc),
"failed to allocate card detect gpio\n");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 926aaf6..6debda9 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -296,9 +296,12 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
unsigned long timeout;
u16 clk = 0;
- /* don't bother if the clock is going off */
- if (clock == 0)
+ /* If the clock is going off, set to 0 at clock control register */
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ host->clock = clock;
return;
+ }
sdhci_s3c_set_clock(host, clock);
@@ -608,6 +611,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
host->hw_name = "samsung-hsmmc";
host->ops = &sdhci_s3c_ops;
host->quirks = 0;
+ host->quirks2 = 0;
host->irq = irq;
/* Setup quirks for the controller */
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 62a4a83..696122c 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -84,7 +84,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
* gets setup in sdhci_add_host() and we oops.
*/
if (gpio_is_valid(priv->gpio_cd)) {
- ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd);
+ ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0);
if (ret) {
dev_err(&pdev->dev, "card detect irq request failed: %d\n",
ret);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index dd2c083..7a7fb4f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3119,6 +3119,9 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_MAX_CURRENT_MULTIPLIER;
}
+ if (host->ocr_mask)
+ ocr_avail = host->ocr_mask;
+
mmc->ocr_avail = ocr_avail;
mmc->ocr_avail_sdio = ocr_avail;
if (host->ocr_avail_sdio)
@@ -3213,6 +3216,8 @@ int sdhci_add_host(struct sdhci_host *host)
host->tuning_timer.function = sdhci_tuning_timer;
}
+ sdhci_init(host, 0);
+
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(mmc), host);
if (ret) {
@@ -3221,8 +3226,6 @@ int sdhci_add_host(struct sdhci_host *host)
goto untasklet;
}
- sdhci_init(host, 0);
-
#ifdef CONFIG_MMC_DEBUG
sdhci_dumpregs(host);
#endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 6706b5e..36629a0 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -61,6 +61,7 @@
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
#include <linux/spinlock.h>
#include <linux/module.h>
@@ -133,6 +134,8 @@
INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
+#define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
+
/* CE_INT_MASK */
#define MASK_ALL 0x00000000
#define MASK_MCCSDE (1 << 29)
@@ -161,7 +164,7 @@
#define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
- MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+ MASK_MCRCSTO | MASK_MWDATTO | \
MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
@@ -243,6 +246,8 @@ struct sh_mmcif_host {
int sg_blkidx;
bool power;
bool card_present;
+ bool ccs_enable; /* Command Completion Signal support */
+ bool clk_ctrl2_enable;
struct mutex thread_lock;
/* DMA support */
@@ -386,25 +391,29 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
host->dma_active = false;
- if (!pdata)
- return;
-
- if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ if (pdata) {
+ if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ return;
+ } else if (!host->pd->dev.of_node) {
return;
+ }
/* We can only either use DMA for both Tx and Rx or not use it at all */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_tx);
+ host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ pdata ? (void *)pdata->slave_id_tx : NULL,
+ &host->pd->dev, "tx");
dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
if (!host->chan_tx)
return;
- cfg.slave_id = pdata->slave_id_tx;
+ /* In the OF case the driver will get the slave ID from the DT */
+ if (pdata)
+ cfg.slave_id = pdata->slave_id_tx;
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start + MMCIF_CE_DATA;
cfg.src_addr = 0;
@@ -412,15 +421,17 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
if (ret < 0)
goto ecfgtx;
- host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_rx);
+ host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ pdata ? (void *)pdata->slave_id_rx : NULL,
+ &host->pd->dev, "rx");
dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
if (!host->chan_rx)
goto erqrx;
- cfg.slave_id = pdata->slave_id_rx;
+ if (pdata)
+ cfg.slave_id = pdata->slave_id_rx;
cfg.direction = DMA_DEV_TO_MEM;
cfg.dst_addr = 0;
cfg.src_addr = res->start + MMCIF_CE_DATA;
@@ -485,8 +496,12 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
+ if (host->ccs_enable)
+ tmp |= SCCSTO_29;
+ if (host->clk_ctrl2_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
- SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+ SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
/* byte swap on */
sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
}
@@ -866,6 +881,9 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
break;
}
+ if (host->ccs_enable)
+ mask |= MASK_MCCSTO;
+
if (mrq->data) {
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
@@ -873,7 +891,10 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
}
opc = sh_mmcif_set_cmd(host, mrq);
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+ if (host->ccs_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+ else
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
/* set arg */
sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
@@ -956,11 +977,8 @@ static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
{
- struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
struct mmc_host *mmc = host->mmc;
- if (pd && pd->set_pwr)
- pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF);
if (!IS_ERR(mmc->supply.vmmc))
/* Errors ignored... */
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
@@ -1241,11 +1259,14 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
- u32 state;
+ u32 state, mask;
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(state & sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK)));
+ mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
+ if (host->ccs_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
+ else
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
if (state & ~MASK_CLEAN)
@@ -1379,6 +1400,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
host->mmc = mmc;
host->addr = reg;
host->timeout = msecs_to_jiffies(1000);
+ host->ccs_enable = !pd || !pd->ccs_unsupported;
+ host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
host->pd = pdev;
@@ -1436,7 +1459,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
}
if (pd && pd->use_cd_gpio) {
- ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
+ ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
if (ret < 0)
goto erqcd;
}
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index ebea749..87ed3fb 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -70,20 +70,6 @@ static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
clk_disable(priv->clk);
}
-static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
-{
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
- p->set_pwr(pdev, state);
-}
-
-static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
-{
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
- return p->get_cd(pdev);
-}
-
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
{
int timeout = 1000;
@@ -129,7 +115,12 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,shmobile-sdhi" },
{ .compatible = "renesas,sh7372-sdhi" },
+ { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
{ .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -180,10 +171,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->capabilities |= p->tmio_caps;
mmc_data->capabilities2 |= p->tmio_caps2;
mmc_data->cd_gpio = p->cd_gpio;
- if (p->set_pwr)
- mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
- if (p->get_cd)
- mmc_data->get_cd = sh_mobile_sdhi_get_cd;
if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
/*
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 47bdb8f..65edb4a 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
+ tmio_mmc_enable_dma(host, false);
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
@@ -116,7 +117,6 @@ pio:
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
- tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
@@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
+ tmio_mmc_enable_dma(host, false);
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
@@ -197,7 +198,6 @@ pio:
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
- tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index b72edb7..b380225 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -795,9 +795,13 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* omap_hsmmc.c driver does.
*/
if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
- regulator_enable(mmc->supply.vqmmc);
+ ret = regulator_enable(mmc->supply.vqmmc);
udelay(200);
}
+
+ if (ret < 0)
+ dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
+ ret);
}
static void tmio_mmc_power_off(struct tmio_mmc_host *host)
@@ -932,25 +936,11 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
-static int tmio_mmc_get_cd(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
- struct tmio_mmc_data *pdata = host->pdata;
- int ret = mmc_gpio_get_cd(mmc);
- if (ret >= 0)
- return ret;
-
- if (!pdata->get_cd)
- return -ENOSYS;
- else
- return pdata->get_cd(host->pdev);
-}
-
static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
- .get_cd = tmio_mmc_get_cd,
+ .get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
@@ -1106,7 +1096,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio);
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
if (ret < 0) {
tmio_mmc_host_remove(_host);
return ret;
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cb9f361..e9028ad 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2079,7 +2079,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
kref_put(&vub300->kref, vub300_delete);
}
-void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
+static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = mmc_priv(mmc);
dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 6eeb84c..5c81390 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -4,7 +4,7 @@
* Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
* Mike Albon <malbon@openwrt.org>
* Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
- * Copyright © 2011-2012 Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright © 2011-2013 Jonas Gorski <jonas.gorski@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,17 +27,19 @@
#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <asm/mach-bcm63xx/bcm63xx_nvram.h>
#include <asm/mach-bcm63xx/bcm963xx_tag.h>
#include <asm/mach-bcm63xx/board_bcm963xx.h>
#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-#define BCM63XX_CFE_BLOCK_SIZE 0x10000 /* always at least 64KiB */
+#define BCM63XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
@@ -90,7 +92,8 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
BCM63XX_CFE_BLOCK_SIZE);
cfelen = cfe_erasesize;
- nvramlen = cfe_erasesize;
+ nvramlen = bcm63xx_nvram_get_psi_size() * SZ_1K;
+ nvramlen = roundup(nvramlen, cfe_erasesize);
/* Allocate memory for buffer */
buf = vmalloc(sizeof(struct bcm_tag));
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index fff665d..89b9d68 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1571,8 +1571,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
/* FIXME - should have reset delay before continuing */
- printk(KERN_WARNING "MTD %s(): software timeout\n",
- __func__ );
+ printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
+ __func__, adr);
ret = -EIO;
op_done:
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 74dbb6b..ffb36ba 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -211,9 +211,7 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
probe_function = __symbol_get(probename);
if (!probe_function) {
- char modname[sizeof("cfi_cmdset_%4.4X")];
- sprintf(modname, "cfi_cmdset_%4.4X", type);
- request_module(modname);
+ request_module("cfi_cmdset_%4.4X", type);
probe_function = __symbol_get(probename);
}
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index c443f52..7c0b27d 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -120,7 +120,7 @@
#define PM49FL008 0x006A
/* Sharp */
-#define LH28F640BF 0x00b0
+#define LH28F640BF 0x00B0
/* ST - www.st.com */
#define M29F800AB 0x0058
@@ -1299,13 +1299,14 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = CFI_MFR_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
- .devtypes = CFI_DEVICETYPE_X8,
+ .devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
- .dev_size = SIZE_4MiB,
- .cmd_set = P_ID_INTEL_STD,
- .nr_regions = 1,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 2,
.regions = {
- ERASEINFO(0x40000,16),
+ ERASEINFO(0x10000, 127),
+ ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_SST,
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 2a4d55e..74ab4b7 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -224,59 +224,4 @@ config BCH_CONST_T
default 4
endif
-config MTD_DOCPROBE
- tristate
- select MTD_DOCECC
-
-config MTD_DOCECC
- tristate
-
-config MTD_DOCPROBE_ADVANCED
- bool "Advanced detection options for DiskOnChip"
- depends on MTD_DOCPROBE
- help
- This option allows you to specify nonstandard address at which to
- probe for a DiskOnChip, or to change the detection options. You
- are unlikely to need any of this unless you are using LinuxBIOS.
- Say 'N'.
-
-config MTD_DOCPROBE_ADDRESS
- hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED
- depends on MTD_DOCPROBE
- default "0x0"
- ---help---
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option allows you to specify a single address at which to probe
- for the device, which is useful if you have other devices in that
- range which get upset when they are probed.
-
- (Note that on PowerPC, the normal probe will only check at
- 0xE4000000.)
-
- Normally, you should leave this set to zero, to allow the probe at
- the normal addresses.
-
-config MTD_DOCPROBE_HIGH
- bool "Probe high addresses"
- depends on MTD_DOCPROBE_ADVANCED
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option changes to make it probe between 0xFFFC8000 and
- 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
- useful to you. Say 'N'.
-
-config MTD_DOCPROBE_55AA
- bool "Probe for 0x55 0xAA BIOS Extension Signature"
- depends on MTD_DOCPROBE_ADVANCED
- help
- Check for the 0x55 0xAA signature of a DiskOnChip, and do not
- continue with probing if it is absent. The signature will always be
- present for a DiskOnChip 2000 or a normal DiskOnChip Millennium.
- Only if you have overwritten the first block of a DiskOnChip
- Millennium will it be absent. Enable this option if you are using
- LinuxBIOS or if you need to recover a DiskOnChip Millennium on which
- you have managed to wipe the first block.
-
endmenu
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 18e7761..77de29b 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -1,6 +1,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
@@ -12,6 +13,93 @@ MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
static const char * const probes[] = { "bcm47xxpart", NULL };
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static void bcm47xxsflash_cmd(struct bcm47xxsflash *b47s, u32 opcode)
+{
+ int i;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHCTL, BCMA_CC_FLASHCTL_START | opcode);
+ for (i = 0; i < 1000; i++) {
+ if (!(b47s->cc_read(b47s, BCMA_CC_FLASHCTL) &
+ BCMA_CC_FLASHCTL_BUSY))
+ return;
+ cpu_relax();
+ }
+ pr_err("Control command failed (timeout)!\n");
+}
+
+static int bcm47xxsflash_poll(struct bcm47xxsflash *b47s, int timeout)
+{
+ unsigned long deadline = jiffies + timeout;
+
+ do {
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_RDSR);
+ if (!(b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+ SR_ST_WIP))
+ return 0;
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_STATUS);
+ if (b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+ SR_AT_READY)
+ return 0;
+ break;
+ }
+
+ cpu_relax();
+ udelay(1);
+ } while (!time_after_eq(jiffies, deadline));
+
+ pr_err("Timeout waiting for flash to be ready!\n");
+
+ return -EBUSY;
+}
+
+/**************************************************
+ * MTD ops
+ **************************************************/
+
+static int bcm47xxsflash_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int err;
+
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr);
+ /* Newer flashes have "sub-sectors" which can be erased
+ * independently with a new command: ST_SSE. The ST_SE command
+ * erases 64KB just as before.
+ */
+ if (b47s->blocksize < (64 * 1024))
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_SSE);
+ else
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_SE);
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr << 1);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_PAGE_ERASE);
+ break;
+ }
+
+ err = bcm47xxsflash_poll(b47s, HZ);
+ if (err)
+ erase->state = MTD_ERASE_FAILED;
+ else
+ erase->state = MTD_ERASE_DONE;
+
+ if (erase->callback)
+ erase->callback(erase);
+
+ return err;
+}
+
static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -28,6 +116,127 @@ static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
return len;
}
+static int bcm47xxsflash_write_st(struct mtd_info *mtd, u32 offset, size_t len,
+ const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int written = 0;
+
+ /* Enable writes */
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
+
+ /* Write first byte */
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, offset);
+ b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
+
+ /* Program page */
+ if (b47s->bcma_cc->core->id.rev < 20) {
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_PP);
+ return 1; /* 1B written */
+ }
+
+ /* Program page and set CSA (on newer chips we can continue writing) */
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | OPCODE_ST_PP);
+ offset++;
+ len--;
+ written++;
+
+ while (len > 0) {
+ /* Page boundary, another function call is needed */
+ if ((offset & 0xFF) == 0)
+ break;
+
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | *buf++);
+ offset++;
+ len--;
+ written++;
+ }
+
+ /* All done, drop CSA & poll */
+ b47s->cc_write(b47s, BCMA_CC_FLASHCTL, 0);
+ udelay(1);
+ if (bcm47xxsflash_poll(b47s, HZ / 10))
+ pr_err("Flash rejected dropping CSA\n");
+
+ return written;
+}
+
+static int bcm47xxsflash_write_at(struct mtd_info *mtd, u32 offset, size_t len,
+ const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ u32 mask = b47s->blocksize - 1;
+ u32 page = (offset & ~mask) << 1;
+ u32 byte = offset & mask;
+ int written = 0;
+
+ /* If we don't overwrite whole page, read it to the buffer first */
+ if (byte || (len < b47s->blocksize)) {
+ int err;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_LOAD);
+ /* 250 us for AT45DB321B */
+ err = bcm47xxsflash_poll(b47s, HZ / 1000);
+ if (err) {
+ pr_err("Timeout reading page 0x%X info buffer\n", page);
+ return err;
+ }
+ }
+
+ /* Change buffer content with our data */
+ while (len > 0) {
+ /* Page boundary, another function call is needed */
+ if (byte == b47s->blocksize)
+ break;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, byte++);
+ b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_WRITE);
+ len--;
+ written++;
+ }
+
+ /* Program page with the buffer content */
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_PROGRAM);
+
+ return written;
+}
+
+static int bcm47xxsflash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int written;
+
+ /* Writing functions can return without writing all passed data, for
+ * example when the hardware is too old or when we git page boundary.
+ */
+ while (len > 0) {
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ written = bcm47xxsflash_write_st(mtd, to, len, buf);
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ written = bcm47xxsflash_write_at(mtd, to, len, buf);
+ break;
+ default:
+ BUG_ON(1);
+ }
+ if (written < 0) {
+ pr_err("Error writing at offset 0x%llX\n", to);
+ return written;
+ }
+ to += (loff_t)written;
+ len -= written;
+ *retlen += written;
+ buf += written;
+ }
+
+ return 0;
+}
+
static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
{
struct mtd_info *mtd = &b47s->mtd;
@@ -35,33 +244,48 @@ static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
mtd->priv = b47s;
mtd->name = "bcm47xxsflash";
mtd->owner = THIS_MODULE;
- mtd->type = MTD_ROM;
+
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
mtd->size = b47s->size;
- mtd->_read = bcm47xxsflash_read;
+ mtd->erasesize = b47s->blocksize;
+ mtd->writesize = 1;
+ mtd->writebufsize = 1;
- /* TODO: implement writing support and verify/change following code */
- mtd->flags = MTD_CAP_ROM;
- mtd->writebufsize = mtd->writesize = 1;
+ mtd->_erase = bcm47xxsflash_erase;
+ mtd->_read = bcm47xxsflash_read;
+ mtd->_write = bcm47xxsflash_write;
}
/**************************************************
* BCMA
**************************************************/
+static int bcm47xxsflash_bcma_cc_read(struct bcm47xxsflash *b47s, u16 offset)
+{
+ return bcma_cc_read32(b47s->bcma_cc, offset);
+}
+
+static void bcm47xxsflash_bcma_cc_write(struct bcm47xxsflash *b47s, u16 offset,
+ u32 value)
+{
+ bcma_cc_write32(b47s->bcma_cc, offset, value);
+}
+
static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
struct bcm47xxsflash *b47s;
int err;
- b47s = kzalloc(sizeof(*b47s), GFP_KERNEL);
- if (!b47s) {
- err = -ENOMEM;
- goto out;
- }
+ b47s = devm_kzalloc(&pdev->dev, sizeof(*b47s), GFP_KERNEL);
+ if (!b47s)
+ return -ENOMEM;
sflash->priv = b47s;
b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
+ b47s->cc_read = bcm47xxsflash_bcma_cc_read;
+ b47s->cc_write = bcm47xxsflash_bcma_cc_write;
switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
@@ -81,15 +305,13 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
- goto err_dev_reg;
+ return err;
}
- return 0;
+ if (bcm47xxsflash_poll(b47s, HZ / 10))
+ pr_warn("Serial flash busy\n");
-err_dev_reg:
- kfree(&b47s->mtd);
-out:
- return err;
+ return 0;
}
static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
@@ -98,7 +320,6 @@ static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
struct bcm47xxsflash *b47s = sflash->priv;
mtd_device_unregister(&b47s->mtd);
- kfree(b47s);
return 0;
}
@@ -116,22 +337,4 @@ static struct platform_driver bcma_sflash_driver = {
* Init
**************************************************/
-static int __init bcm47xxsflash_init(void)
-{
- int err;
-
- err = platform_driver_register(&bcma_sflash_driver);
- if (err)
- pr_err("Failed to register BCMA serial flash driver: %d\n",
- err);
-
- return err;
-}
-
-static void __exit bcm47xxsflash_exit(void)
-{
- platform_driver_unregister(&bcma_sflash_driver);
-}
-
-module_init(bcm47xxsflash_init);
-module_exit(bcm47xxsflash_exit);
+module_platform_driver(bcma_sflash_driver);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
index f22f8c4..fe93daf 100644
--- a/drivers/mtd/devices/bcm47xxsflash.h
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -60,6 +60,8 @@ enum bcm47xxsflash_type {
struct bcm47xxsflash {
struct bcma_drv_cc *bcma_cc;
+ int (*cc_read)(struct bcm47xxsflash *b47s, u16 offset);
+ void (*cc_write)(struct bcm47xxsflash *b47s, u16 offset, u32 value);
enum bcm47xxsflash_type type;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e081bfe..5cb4c04 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -6,6 +6,9 @@
*
* Licence: GPL
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
@@ -18,10 +21,6 @@
#include <linux/mount.h>
#include <linux/slab.h>
-#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
-#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
-
-
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
@@ -84,7 +83,7 @@ static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
err = _block2mtd_erase(dev, from, len);
mutex_unlock(&dev->write_mutex);
if (err) {
- ERROR("erase failed err = %d", err);
+ pr_err("erase failed err = %d\n", err);
instr->state = MTD_ERASE_FAILED;
} else
instr->state = MTD_ERASE_DONE;
@@ -239,13 +238,13 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
#endif
if (IS_ERR(bdev)) {
- ERROR("error: cannot open device %s", devname);
+ pr_err("error: cannot open device %s\n", devname);
goto devinit_err;
}
dev->blkdev = bdev;
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
- ERROR("attempting to use an MTD device as a block device");
+ pr_err("attempting to use an MTD device as a block device\n");
goto devinit_err;
}
@@ -277,9 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
goto devinit_err;
}
list_add(&dev->list, &blkmtd_device_list);
- INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
- dev->mtd.name + strlen("block2mtd: "),
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
+ dev->mtd.index,
+ dev->mtd.name + strlen("block2mtd: "),
+ dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
devinit_err:
@@ -339,17 +339,11 @@ static inline void kill_final_newline(char *str)
}
-#define parse_err(fmt, args...) do { \
- ERROR(fmt, ## args); \
- return 0; \
-} while (0)
-
#ifndef MODULE
static int block2mtd_init_called = 0;
static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
#endif
-
static int block2mtd_setup2(const char *val)
{
char buf[80 + 12]; /* 80 for device, 12 for erase size */
@@ -359,8 +353,10 @@ static int block2mtd_setup2(const char *val)
size_t erase_size = PAGE_SIZE;
int i, ret;
- if (strnlen(val, sizeof(buf)) >= sizeof(buf))
- parse_err("parameter too long");
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
+ pr_err("parameter too long\n");
+ return 0;
+ }
strcpy(str, val);
kill_final_newline(str);
@@ -368,20 +364,27 @@ static int block2mtd_setup2(const char *val)
for (i = 0; i < 2; i++)
token[i] = strsep(&str, ",");
- if (str)
- parse_err("too many arguments");
+ if (str) {
+ pr_err("too many arguments\n");
+ return 0;
+ }
- if (!token[0])
- parse_err("no argument");
+ if (!token[0]) {
+ pr_err("no argument\n");
+ return 0;
+ }
name = token[0];
- if (strlen(name) + 1 > 80)
- parse_err("device name too long");
+ if (strlen(name) + 1 > 80) {
+ pr_err("device name too long\n");
+ return 0;
+ }
if (token[1]) {
ret = parse_num(&erase_size, token[1]);
if (ret) {
- parse_err("illegal erase size");
+ pr_err("illegal erase size\n");
+ return 0;
}
}
@@ -444,8 +447,9 @@ static void block2mtd_exit(void)
struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
block2mtd_sync(&dev->mtd);
mtd_device_unregister(&dev->mtd);
- INFO("mtd%d: [%s] removed", dev->mtd.index,
- dev->mtd.name + strlen("block2mtd: "));
+ pr_info("mtd%d: [%s] removed\n",
+ dev->mtd.index,
+ dev->mtd.name + strlen("block2mtd: "));
list_del(&dev->list);
block2mtd_free_device(dev);
}
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index dccef9f..d1dd6a3 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -20,14 +20,21 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/sched.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/elm.h>
+#define ELM_SYSCONFIG 0x010
#define ELM_IRQSTATUS 0x018
#define ELM_IRQENABLE 0x01c
#define ELM_LOCATION_CONFIG 0x020
#define ELM_PAGE_CTRL 0x080
#define ELM_SYNDROME_FRAGMENT_0 0x400
+#define ELM_SYNDROME_FRAGMENT_1 0x404
+#define ELM_SYNDROME_FRAGMENT_2 0x408
+#define ELM_SYNDROME_FRAGMENT_3 0x40c
+#define ELM_SYNDROME_FRAGMENT_4 0x410
+#define ELM_SYNDROME_FRAGMENT_5 0x414
#define ELM_SYNDROME_FRAGMENT_6 0x418
#define ELM_LOCATION_STATUS 0x800
#define ELM_ERROR_LOCATION_0 0x880
@@ -56,12 +63,27 @@
#define SYNDROME_FRAGMENT_REG_SIZE 0x40
#define ERROR_LOCATION_SIZE 0x100
+struct elm_registers {
+ u32 elm_irqenable;
+ u32 elm_sysconfig;
+ u32 elm_location_config;
+ u32 elm_page_ctrl;
+ u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
+};
+
struct elm_info {
struct device *dev;
void __iomem *elm_base;
struct completion elm_completion;
struct list_head list;
enum bch_ecc bch_type;
+ struct elm_registers elm_regs;
};
static LIST_HEAD(elm_devices);
@@ -346,14 +368,9 @@ static int elm_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
- info->elm_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!info->elm_base)
- return -EADDRNOTAVAIL;
+ info->elm_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->elm_base))
+ return PTR_ERR(info->elm_base);
ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
pdev->name, info);
@@ -381,10 +398,103 @@ static int elm_remove(struct platform_device *pdev)
{
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
return 0;
}
+/**
+ * elm_context_save
+ * saves ELM configurations to preserve them across Hardware powered-down
+ */
+static int elm_context_save(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ regs->elm_irqenable = elm_read_reg(info, ELM_IRQENABLE);
+ regs->elm_sysconfig = elm_read_reg(info, ELM_SYSCONFIG);
+ regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
+ regs->elm_page_ctrl = elm_read_reg(info, ELM_PAGE_CTRL);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH8_ECC:
+ regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_3 + offset);
+ regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_2 + offset);
+ case BCH4_ECC:
+ regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_1 + offset);
+ regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_0 + offset);
+ default:
+ return -EINVAL;
+ }
+ /* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
+ * to be saved for all BCH schemes*/
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ }
+ return 0;
+}
+
+/**
+ * elm_context_restore
+ * writes configurations saved duing power-down back into ELM registers
+ */
+static int elm_context_restore(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ elm_write_reg(info, ELM_IRQENABLE, regs->elm_irqenable);
+ elm_write_reg(info, ELM_SYSCONFIG, regs->elm_sysconfig);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
+ elm_write_reg(info, ELM_PAGE_CTRL, regs->elm_page_ctrl);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH8_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
+ regs->elm_syndrome_fragment_3[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
+ regs->elm_syndrome_fragment_2[i]);
+ case BCH4_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
+ regs->elm_syndrome_fragment_1[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+ regs->elm_syndrome_fragment_0[i]);
+ default:
+ return -EINVAL;
+ }
+ /* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i] &
+ ELM_SYNDROME_VALID);
+ }
+ return 0;
+}
+
+static int elm_suspend(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ elm_context_save(info);
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int elm_resume(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ pm_runtime_get_sync(dev);
+ elm_context_restore(info);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
+
#ifdef CONFIG_OF
static const struct of_device_id elm_of_match[] = {
{ .compatible = "ti,am3352-elm" },
@@ -398,6 +508,7 @@ static struct platform_driver elm_driver = {
.name = "elm",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(elm_of_match),
+ .pm = &elm_pm_ops,
},
.probe = elm_probe,
.remove = elm_remove,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 2f3d2a5..26b14f9 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -43,17 +43,24 @@
#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
+#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
+#define OPCODE_NORM_READ_4B 0x13 /* Read data bytes (low frequency) */
+#define OPCODE_FAST_READ_4B 0x0c /* Read data bytes (high frequency) */
+#define OPCODE_PP_4B 0x12 /* Page program (up to 256 bytes) */
+#define OPCODE_SE_4B 0xdc /* Sector erase (usually 64KiB) */
+
/* Used for SST flashes only. */
#define OPCODE_BP 0x02 /* Byte program */
#define OPCODE_WRDI 0x04 /* Write disable */
#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
-/* Used for Macronix flashes only. */
+/* Used for Macronix and Winbond flashes. */
#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
@@ -84,6 +91,8 @@ struct m25p {
u16 page_size;
u16 addr_width;
u8 erase_opcode;
+ u8 read_opcode;
+ u8 program_opcode;
u8 *command;
bool fast_read;
};
@@ -161,6 +170,7 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
{
switch (JEDEC_MFR(jedec_id)) {
case CFI_MFR_MACRONIX:
+ case CFI_MFR_ST: /* Micron, actually */
case 0xEF /* winbond */:
flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
return spi_write(flash->spi, flash->command, 1);
@@ -371,7 +381,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
/* Set up the write data buffer. */
- opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ;
+ opcode = flash->read_opcode;
flash->command[0] = opcode;
m25p_addr2cmd(flash, from, flash->command);
@@ -422,7 +432,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
write_enable(flash);
/* Set up the opcode in the write buffer. */
- flash->command[0] = OPCODE_PP;
+ flash->command[0] = flash->program_opcode;
m25p_addr2cmd(flash, to, flash->command);
page_offset = to & (flash->page_size - 1);
@@ -682,6 +692,8 @@ struct flash_info {
#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
#define M25P_NO_ERASE 0x02 /* No erase command needed */
#define SST_WRITE 0x04 /* use SST byte programming */
+#define M25P_NO_FR 0x08 /* Can't do fastread */
+#define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
@@ -694,13 +706,13 @@ struct flash_info {
.flags = (_flags), \
})
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
((kernel_ulong_t)&(struct flash_info) { \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
- .flags = M25P_NO_ERASE, \
+ .flags = (_flags), \
})
/* NOTE: double check command sets and memory organization when you add
@@ -732,7 +744,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
/* Everspin */
- { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
/* GigaDevice */
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
@@ -762,6 +775,11 @@ static const struct spi_device_id m25p_ids[] = {
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
@@ -840,17 +858,18 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
- { "cat25c03", CAT25_INFO( 32, 8, 16, 2) },
- { "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
- { "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
+ { "cat25c11", CAT25_INFO( 16, 8, 16, 1, M25P_NO_ERASE | M25P_NO_FR) },
+ { "cat25c03", CAT25_INFO( 32, 8, 16, 2, M25P_NO_ERASE | M25P_NO_FR) },
+ { "cat25c09", CAT25_INFO( 128, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) },
+ { "cat25c17", CAT25_INFO( 256, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2, M25P_NO_ERASE | M25P_NO_FR) },
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
@@ -920,7 +939,7 @@ static int m25p_probe(struct spi_device *spi)
* a chip ID, try the JEDEC id commands; they'll work for most
* newer chips, even if we don't recognize the particular chip.
*/
- data = spi->dev.platform_data;
+ data = dev_get_platdata(&spi->dev);
if (data && data->type) {
const struct spi_device_id *plat_id;
@@ -972,7 +991,7 @@ static int m25p_probe(struct spi_device *spi)
flash->spi = spi;
mutex_init(&flash->lock);
- dev_set_drvdata(&spi->dev, flash);
+ spi_set_drvdata(spi, flash);
/*
* Atmel, SST and Intel/Numonyx serial flash tend to power
@@ -1014,6 +1033,9 @@ static int m25p_probe(struct spi_device *spi)
if (info->flags & SECT_4K) {
flash->erase_opcode = OPCODE_BE_4K;
flash->mtd.erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
+ flash->erase_opcode = OPCODE_BE_4K_PMC;
+ flash->mtd.erasesize = 4096;
} else {
flash->erase_opcode = OPCODE_SE;
flash->mtd.erasesize = info->sector_size;
@@ -1028,24 +1050,41 @@ static int m25p_probe(struct spi_device *spi)
flash->mtd.writebufsize = flash->page_size;
flash->fast_read = false;
-#ifdef CONFIG_OF
if (np && of_property_read_bool(np, "m25p,fast-read"))
flash->fast_read = true;
-#endif
#ifdef CONFIG_M25PXX_USE_FAST_READ
flash->fast_read = true;
#endif
+ if (info->flags & M25P_NO_FR)
+ flash->fast_read = false;
+
+ /* Default commands */
+ if (flash->fast_read)
+ flash->read_opcode = OPCODE_FAST_READ;
+ else
+ flash->read_opcode = OPCODE_NORM_READ;
+
+ flash->program_opcode = OPCODE_PP;
if (info->addr_width)
flash->addr_width = info->addr_width;
- else {
+ else if (flash->mtd.size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
- if (flash->mtd.size > 0x1000000) {
- flash->addr_width = 4;
- set_4byte(flash, info->jedec_id, 1);
+ flash->addr_width = 4;
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
+ /* Dedicated 4-byte command set */
+ flash->read_opcode = flash->fast_read ?
+ OPCODE_FAST_READ_4B :
+ OPCODE_NORM_READ_4B;
+ flash->program_opcode = OPCODE_PP_4B;
+ /* No small sector erase for 4-byte command set */
+ flash->erase_opcode = OPCODE_SE_4B;
+ flash->mtd.erasesize = info->sector_size;
} else
- flash->addr_width = 3;
+ set_4byte(flash, info->jedec_id, 1);
+ } else {
+ flash->addr_width = 3;
}
dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
@@ -1080,7 +1119,7 @@ static int m25p_probe(struct spi_device *spi)
static int m25p_remove(struct spi_device *spi)
{
- struct m25p *flash = dev_get_drvdata(&spi->dev);
+ struct m25p *flash = spi_get_drvdata(spi);
int status;
/* Clean up MTD stuff. */
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 28779b6..0e8cbfe 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -622,7 +622,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
struct dataflash *priv;
struct mtd_info *device;
struct mtd_part_parser_data ppdata;
- struct flash_platform_data *pdata = spi->dev.platform_data;
+ struct flash_platform_data *pdata = dev_get_platdata(&spi->dev);
char *otp_tag = "";
int err = 0;
@@ -661,7 +661,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
name, (long long)((device->size + 1023) >> 10),
pagesize, otp_tag);
- dev_set_drvdata(&spi->dev, priv);
+ spi_set_drvdata(spi, priv);
ppdata.of_node = spi->dev.of_node;
err = mtd_device_parse_register(device, NULL, &ppdata,
@@ -671,7 +671,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
if (!err)
return 0;
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
kfree(priv);
return err;
}
@@ -895,14 +895,14 @@ static int dataflash_probe(struct spi_device *spi)
static int dataflash_remove(struct spi_device *spi)
{
- struct dataflash *flash = dev_get_drvdata(&spi->dev);
+ struct dataflash *flash = spi_get_drvdata(spi);
int status;
pr_debug("%s: remove\n", dev_name(&spi->dev));
status = mtd_device_unregister(&flash->mtd);
if (status == 0) {
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
kfree(flash);
}
return status;
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 8a82b8b..4238214 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -550,7 +550,7 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
{
struct spear_snor_flash *flash = get_flash_data(mtd);
struct spear_smi *dev = mtd->priv;
- void *src;
+ void __iomem *src;
u32 ctrlreg1, val;
int ret;
@@ -583,7 +583,7 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
writel(val, dev->io_base + SMI_CR1);
- memcpy_fromio(buf, (u8 *)src, len);
+ memcpy_fromio(buf, src, len);
/* restore ctrl reg1 */
writel(ctrlreg1, dev->io_base + SMI_CR1);
@@ -596,7 +596,7 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
}
static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
- void *dest, const void *src, size_t len)
+ void __iomem *dest, const void *src, size_t len)
{
int ret;
u32 ctrlreg1;
@@ -643,7 +643,7 @@ static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
{
struct spear_snor_flash *flash = get_flash_data(mtd);
struct spear_smi *dev = mtd->priv;
- void *dest;
+ void __iomem *dest;
u32 page_offset, page_size;
int ret;
@@ -995,14 +995,12 @@ static int spear_smi_probe(struct platform_device *pdev)
ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
if (ret) {
dev_err(&dev->pdev->dev, "bank setup failed\n");
- goto err_bank_setup;
+ goto err_irq;
}
}
return 0;
-err_bank_setup:
- platform_set_drvdata(pdev, NULL);
err_irq:
clk_disable_unprepare(dev->clk);
err:
@@ -1040,12 +1038,11 @@ static int spear_smi_remove(struct platform_device *pdev)
}
clk_disable_unprepare(dev->clk);
- platform_set_drvdata(pdev, NULL);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int spear_smi_suspend(struct device *dev)
{
struct spear_smi *sdev = dev_get_drvdata(dev);
@@ -1068,9 +1065,9 @@ static int spear_smi_resume(struct device *dev)
spear_smi_hw_init(sdev);
return ret;
}
+#endif
static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
-#endif
#ifdef CONFIG_OF
static const struct of_device_id spear_smi_id_table[] = {
@@ -1086,9 +1083,7 @@ static struct platform_driver spear_smi_driver = {
.bus = &platform_bus_type,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(spear_smi_id_table),
-#ifdef CONFIG_PM
.pm = &spear_smi_pm_ops,
-#endif
},
.probe = spear_smi_probe,
.remove = spear_smi_remove,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 8091b01..a42f1f0 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -370,9 +370,9 @@ static int sst25l_probe(struct spi_device *spi)
flash->spi = spi;
mutex_init(&flash->lock);
- dev_set_drvdata(&spi->dev, flash);
+ spi_set_drvdata(spi, flash);
- data = spi->dev.platform_data;
+ data = dev_get_platdata(&spi->dev);
if (data && data->name)
flash->mtd.name = data->name;
else
@@ -404,7 +404,7 @@ static int sst25l_probe(struct spi_device *spi)
data ? data->nr_parts : 0);
if (ret) {
kfree(flash);
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
return -ENODEV;
}
@@ -413,7 +413,7 @@ static int sst25l_probe(struct spi_device *spi)
static int sst25l_remove(struct spi_device *spi)
{
- struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
+ struct sst25l_flash *flash = spi_get_drvdata(spi);
int ret;
ret = mtd_device_unregister(&flash->mtd);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8b27ca0..310dc7c 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -157,24 +157,6 @@ config MTD_PXA2XX
help
This provides a driver for the NOR flash attached to a PXA2xx chip.
-config MTD_OCTAGON
- tristate "JEDEC Flash device mapped on Octagon 5066 SBC"
- depends on X86 && MTD_JEDEC && MTD_COMPLEX_MAPPINGS
- help
- This provides a 'mapping' driver which supports the way in which
- the flash chips are connected in the Octagon-5066 Single Board
- Computer. More information on the board is available at
- <http://www.octagonsystems.com/products/5066.aspx>.
-
-config MTD_VMAX
- tristate "JEDEC Flash device mapped on Tempustech VMAX SBC301"
- depends on X86 && MTD_JEDEC && MTD_COMPLEX_MAPPINGS
- help
- This provides a 'mapping' driver which supports the way in which
- the flash chips are connected in the Tempustech VMAX SBC301 Single
- Board Computer. More information on the board is available at
- <http://www.tempustech.com/>.
-
config MTD_SCx200_DOCFLASH
tristate "Flash device mapped with DOCCS on NatSemi SCx200"
depends on SCx200 && MTD_CFI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 9fdbd4b..141c91a 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
-obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
@@ -28,7 +27,6 @@ obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
obj-$(CONFIG_MTD_NETSC520) += netsc520.o
obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o
obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
-obj-$(CONFIG_MTD_VMAX) += vmax301.o
obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 319b04a..5434d8d 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -128,7 +128,7 @@ static const char * const part_probe_types[] = {
static int bfin_flash_probe(struct platform_device *pdev)
{
int ret;
- struct physmap_flash_data *pdata = pdev->dev.platform_data;
+ struct physmap_flash_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
struct async_state *state;
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index d16fc9d..d504b3d 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -55,13 +55,13 @@
#define FLASH_PARTITION3_SIZE 0x001C0000
-struct map_info flagadm_map = {
+static struct map_info flagadm_map = {
.name = "FlagaDM flash device",
.size = FLASH_SIZE,
.bankwidth = 2,
};
-struct mtd_partition flagadm_parts[] = {
+static struct mtd_partition flagadm_parts[] = {
{
.name = "Bootloader",
.offset = FLASH_PARTITION0_ADDR,
@@ -112,7 +112,7 @@ static int __init init_flagadm(void)
return 0;
}
- iounmap((void *)flagadm_map.virt);
+ iounmap((void __iomem *)flagadm_map.virt);
return -ENXIO;
}
@@ -123,8 +123,8 @@ static void __exit cleanup_flagadm(void)
map_destroy(mymtd);
}
if (flagadm_map.virt) {
- iounmap((void *)flagadm_map.virt);
- flagadm_map.virt = 0;
+ iounmap((void __iomem *)flagadm_map.virt);
+ flagadm_map.virt = NULL;
}
}
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 5ede282..1adba86 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -196,7 +196,7 @@ static int gpio_flash_probe(struct platform_device *pdev)
struct resource *gpios;
struct async_state *state;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gpios = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 4968674..15bbda0 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -79,7 +79,7 @@ static int __init init_impa7(void)
}
simple_map_init(&impa7_map[i]);
- impa7_mtd[i] = 0;
+ impa7_mtd[i] = NULL;
type = rom_probe_types;
for(; !impa7_mtd[i] && *type; type++) {
impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
@@ -91,9 +91,9 @@ static int __init init_impa7(void)
mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
partitions,
ARRAY_SIZE(partitions));
+ } else {
+ iounmap((void __iomem *)impa7_map[i].virt);
}
- else
- iounmap((void *)impa7_map[i].virt);
}
return devicesfound == 0 ? -ENXIO : 0;
}
@@ -105,8 +105,8 @@ static void __exit cleanup_impa7(void)
if (impa7_mtd[i]) {
mtd_device_unregister(impa7_mtd[i]);
map_destroy(impa7_mtd[i]);
- iounmap((void *)impa7_map[i].virt);
- impa7_map[i].virt = 0;
+ iounmap((void __iomem *)impa7_map[i].virt);
+ impa7_map[i].virt = NULL;
}
}
}
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 52b3410..10debfe 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -152,11 +152,9 @@ static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int ixp4xx_flash_remove(struct platform_device *dev)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
+ struct flash_platform_data *plat = dev_get_platdata(&dev->dev);
struct ixp4xx_flash_info *info = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
if(!info)
return 0;
@@ -180,7 +178,7 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
static int ixp4xx_flash_probe(struct platform_device *dev)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
+ struct flash_platform_data *plat = dev_get_platdata(&dev->dev);
struct ixp4xx_flash_info *info;
struct mtd_part_parser_data ppdata = {
.origin = dev->resource->start,
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index ab0fead..98bb5d5 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -102,9 +102,8 @@ static int latch_addr_flash_remove(struct platform_device *dev)
info = platform_get_drvdata(dev);
if (info == NULL)
return 0;
- platform_set_drvdata(dev, NULL);
- latch_addr_data = dev->dev.platform_data;
+ latch_addr_data = dev_get_platdata(&dev->dev);
if (info->mtd != NULL) {
mtd_device_unregister(info->mtd);
@@ -135,7 +134,7 @@ static int latch_addr_flash_probe(struct platform_device *dev)
int chipsel;
int err;
- latch_addr_data = dev->dev.platform_data;
+ latch_addr_data = dev_get_platdata(&dev->dev);
if (latch_addr_data == NULL)
return -ENODEV;
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
deleted file mode 100644
index 807ac2a..0000000
--- a/drivers/mtd/maps/octagon-5066.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/* ######################################################################
-
- Octagon 5066 MTD Driver.
-
- The Octagon 5066 is a SBC based on AMD's 586-WB running at 133 MHZ. It
- comes with a builtin AMD 29F016 flash chip and a socketed EEPROM that
- is replacable by flash. Both units are mapped through a multiplexer
- into a 32k memory window at 0xe8000. The control register for the
- multiplexing unit is located at IO 0x208 with a bit map of
- 0-5 Page Selection in 32k increments
- 6-7 Device selection:
- 00 SSD off
- 01 SSD 0 (Socket)
- 10 SSD 1 (Flash chip)
- 11 undefined
-
- On each SSD, the first 128k is reserved for use by the bios
- (actually it IS the bios..) This only matters if you are booting off the
- flash, you must not put a file system starting there.
-
- The driver tries to do a detection algorithm to guess what sort of devices
- are plugged into the sockets.
-
- ##################################################################### */
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-
-#define WINDOW_START 0xe8000
-#define WINDOW_LENGTH 0x8000
-#define WINDOW_SHIFT 27
-#define WINDOW_MASK 0x7FFF
-#define PAGE_IO 0x208
-
-static volatile char page_n_dev = 0;
-static unsigned long iomapadr;
-static DEFINE_SPINLOCK(oct5066_spin);
-
-/*
- * We use map_priv_1 to identify which device we are.
- */
-
-static void __oct5066_page(struct map_info *map, __u8 byte)
-{
- outb(byte,PAGE_IO);
- page_n_dev = byte;
-}
-
-static inline void oct5066_page(struct map_info *map, unsigned long ofs)
-{
- __u8 byte = map->map_priv_1 | (ofs >> WINDOW_SHIFT);
-
- if (page_n_dev != byte)
- __oct5066_page(map, byte);
-}
-
-
-static map_word oct5066_read8(struct map_info *map, unsigned long ofs)
-{
- map_word ret;
- spin_lock(&oct5066_spin);
- oct5066_page(map, ofs);
- ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
- spin_unlock(&oct5066_spin);
- return ret;
-}
-
-static void oct5066_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- while(len) {
- unsigned long thislen = len;
- if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
- thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
-
- spin_lock(&oct5066_spin);
- oct5066_page(map, from);
- memcpy_fromio(to, iomapadr + from, thislen);
- spin_unlock(&oct5066_spin);
- to += thislen;
- from += thislen;
- len -= thislen;
- }
-}
-
-static void oct5066_write8(struct map_info *map, map_word d, unsigned long adr)
-{
- spin_lock(&oct5066_spin);
- oct5066_page(map, adr);
- writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
- spin_unlock(&oct5066_spin);
-}
-
-static void oct5066_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- while(len) {
- unsigned long thislen = len;
- if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
- thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
-
- spin_lock(&oct5066_spin);
- oct5066_page(map, to);
- memcpy_toio(iomapadr + to, from, thislen);
- spin_unlock(&oct5066_spin);
- to += thislen;
- from += thislen;
- len -= thislen;
- }
-}
-
-static struct map_info oct5066_map[2] = {
- {
- .name = "Octagon 5066 Socket",
- .phys = NO_XIP,
- .size = 512 * 1024,
- .bankwidth = 1,
- .read = oct5066_read8,
- .copy_from = oct5066_copy_from,
- .write = oct5066_write8,
- .copy_to = oct5066_copy_to,
- .map_priv_1 = 1<<6
- },
- {
- .name = "Octagon 5066 Internal Flash",
- .phys = NO_XIP,
- .size = 2 * 1024 * 1024,
- .bankwidth = 1,
- .read = oct5066_read8,
- .copy_from = oct5066_copy_from,
- .write = oct5066_write8,
- .copy_to = oct5066_copy_to,
- .map_priv_1 = 2<<6
- }
-};
-
-static struct mtd_info *oct5066_mtd[2] = {NULL, NULL};
-
-// OctProbe - Sense if this is an octagon card
-// ---------------------------------------------------------------------
-/* Perform a simple validity test, we map the window select SSD0 and
- change pages while monitoring the window. A change in the window,
- controlled by the PAGE_IO port is a functioning 5066 board. This will
- fail if the thing in the socket is set to a uniform value. */
-static int __init OctProbe(void)
-{
- unsigned int Base = (1 << 6);
- unsigned long I;
- unsigned long Values[10];
- for (I = 0; I != 20; I++)
- {
- outb(Base + (I%10),PAGE_IO);
- if (I < 10)
- {
- // Record the value and check for uniqueness
- Values[I%10] = readl(iomapadr);
- if (I > 0 && Values[I%10] == Values[0])
- return -EAGAIN;
- }
- else
- {
- // Make sure we get the same values on the second pass
- if (Values[I%10] != readl(iomapadr))
- return -EAGAIN;
- }
- }
- return 0;
-}
-
-void cleanup_oct5066(void)
-{
- int i;
- for (i=0; i<2; i++) {
- if (oct5066_mtd[i]) {
- mtd_device_unregister(oct5066_mtd[i]);
- map_destroy(oct5066_mtd[i]);
- }
- }
- iounmap((void *)iomapadr);
- release_region(PAGE_IO, 1);
-}
-
-static int __init init_oct5066(void)
-{
- int i;
- int ret = 0;
-
- // Do an autoprobe sequence
- if (!request_region(PAGE_IO,1,"Octagon SSD")) {
- printk(KERN_NOTICE "5066: Page Register in Use\n");
- return -EAGAIN;
- }
- iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH);
- if (!iomapadr) {
- printk(KERN_NOTICE "Failed to ioremap memory region\n");
- ret = -EIO;
- goto out_rel;
- }
- if (OctProbe() != 0) {
- printk(KERN_NOTICE "5066: Octagon Probe Failed, is this an Octagon 5066 SBC?\n");
- iounmap((void *)iomapadr);
- ret = -EAGAIN;
- goto out_unmap;
- }
-
- // Print out our little header..
- printk("Octagon 5066 SSD IO:0x%x MEM:0x%x-0x%x\n",PAGE_IO,WINDOW_START,
- WINDOW_START+WINDOW_LENGTH);
-
- for (i=0; i<2; i++) {
- oct5066_mtd[i] = do_map_probe("cfi_probe", &oct5066_map[i]);
- if (!oct5066_mtd[i])
- oct5066_mtd[i] = do_map_probe("jedec", &oct5066_map[i]);
- if (!oct5066_mtd[i])
- oct5066_mtd[i] = do_map_probe("map_ram", &oct5066_map[i]);
- if (!oct5066_mtd[i])
- oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
- if (oct5066_mtd[i]) {
- oct5066_mtd[i]->owner = THIS_MODULE;
- mtd_device_register(oct5066_mtd[i], NULL, 0);
- }
- }
-
- if (!oct5066_mtd[0] && !oct5066_mtd[1]) {
- cleanup_oct5066();
- return -ENXIO;
- }
-
- return 0;
-
- out_unmap:
- iounmap((void *)iomapadr);
- out_rel:
- release_region(PAGE_IO, 1);
- return ret;
-}
-
-module_init(init_oct5066);
-module_exit(cleanup_oct5066);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com>, David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("MTD map driver for Octagon 5066 Single Board Computer");
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index e7a592c..f73cd46 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -40,9 +40,8 @@ static int physmap_flash_remove(struct platform_device *dev)
info = platform_get_drvdata(dev);
if (info == NULL)
return 0;
- platform_set_drvdata(dev, NULL);
- physmap_data = dev->dev.platform_data;
+ physmap_data = dev_get_platdata(&dev->dev);
if (info->cmtd) {
mtd_device_unregister(info->cmtd);
@@ -69,7 +68,7 @@ static void physmap_set_vpp(struct map_info *map, int state)
unsigned long flags;
pdev = (struct platform_device *)map->map_priv_1;
- physmap_data = pdev->dev.platform_data;
+ physmap_data = dev_get_platdata(&pdev->dev);
if (!physmap_data->set_vpp)
return;
@@ -103,7 +102,7 @@ static int physmap_flash_probe(struct platform_device *dev)
int i;
int devices_found = 0;
- physmap_data = dev->dev.platform_data;
+ physmap_data = dev_get_platdata(&dev->dev);
if (physmap_data == NULL)
return -ENODEV;
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 71fdda2..6762716 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -84,8 +84,6 @@ static int platram_remove(struct platform_device *pdev)
{
struct platram_info *info = to_platram_info(pdev);
- platform_set_drvdata(pdev, NULL);
-
dev_dbg(&pdev->dev, "removing device\n");
if (info == NULL)
@@ -130,13 +128,13 @@ static int platram_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "probe entered\n");
- if (pdev->dev.platform_data == NULL) {
+ if (dev_get_platdata(&pdev->dev) == NULL) {
dev_err(&pdev->dev, "no platform data supplied\n");
err = -ENOENT;
goto exit_error;
}
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index acb1dbc..d210d13 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -49,7 +49,7 @@ static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int pxa2xx_flash_probe(struct platform_device *pdev)
{
- struct flash_platform_data *flash = pdev->dev.platform_data;
+ struct flash_platform_data *flash = dev_get_platdata(&pdev->dev);
struct pxa2xx_flash_info *info;
struct resource *res;
@@ -107,8 +107,6 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index ac02fbf..9352512 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -34,10 +34,9 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
info = platform_get_drvdata(dev);
if (!info)
return 0;
- platform_set_drvdata(dev, NULL);
if (info->mtd) {
- struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
+ struct rbtx4939_flash_data *pdata = dev_get_platdata(&dev->dev);
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
@@ -57,7 +56,7 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
int err = 0;
unsigned long size;
- pdata = dev->dev.platform_data;
+ pdata = dev_get_platdata(&dev->dev);
if (!pdata)
return -ENODEV;
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 29e3dca..8fc06bf 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -248,7 +248,7 @@ static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int sa1100_mtd_probe(struct platform_device *pdev)
{
- struct flash_platform_data *plat = pdev->dev.platform_data;
+ struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
struct sa_info *info;
int err;
@@ -277,9 +277,8 @@ static int sa1100_mtd_probe(struct platform_device *pdev)
static int __exit sa1100_mtd_remove(struct platform_device *pdev)
{
struct sa_info *info = platform_get_drvdata(pdev);
- struct flash_platform_data *plat = pdev->dev.platform_data;
+ struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
sa1100_destroy(info, plat);
return 0;
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
deleted file mode 100644
index 5e68de7..0000000
--- a/drivers/mtd/maps/vmax301.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/* ######################################################################
-
- Tempustech VMAX SBC301 MTD Driver.
-
- The VMAx 301 is a SBC based on . It
- comes with three builtin AMD 29F016B flash chips and a socket for SRAM or
- more flash. Each unit has it's own 8k mapping into a settable region
- (0xD8000). There are two 8k mappings for each MTD, the first is always set
- to the lower 8k of the device the second is paged. Writing a 16 bit page
- value to anywhere in the first 8k will cause the second 8k to page around.
-
- To boot the device a bios extension must be installed into the first 8k
- of flash that is smart enough to copy itself down, page in the rest of
- itself and begin executing.
-
- ##################################################################### */
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-
-
-#define WINDOW_START 0xd8000
-#define WINDOW_LENGTH 0x2000
-#define WINDOW_SHIFT 25
-#define WINDOW_MASK 0x1FFF
-
-/* Actually we could use two spinlocks, but we'd have to have
- more private space in the struct map_info. We lose a little
- performance like this, but we'd probably lose more by having
- the extra indirection from having one of the map->map_priv
- fields pointing to yet another private struct.
-*/
-static DEFINE_SPINLOCK(vmax301_spin);
-
-static void __vmax301_page(struct map_info *map, unsigned long page)
-{
- writew(page, map->map_priv_2 - WINDOW_LENGTH);
- map->map_priv_1 = page;
-}
-
-static inline void vmax301_page(struct map_info *map,
- unsigned long ofs)
-{
- unsigned long page = (ofs >> WINDOW_SHIFT);
- if (map->map_priv_1 != page)
- __vmax301_page(map, page);
-}
-
-static map_word vmax301_read8(struct map_info *map, unsigned long ofs)
-{
- map_word ret;
- spin_lock(&vmax301_spin);
- vmax301_page(map, ofs);
- ret.x[0] = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
- spin_unlock(&vmax301_spin);
- return ret;
-}
-
-static void vmax301_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- while(len) {
- unsigned long thislen = len;
- if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
- thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
- spin_lock(&vmax301_spin);
- vmax301_page(map, from);
- memcpy_fromio(to, map->map_priv_2 + from, thislen);
- spin_unlock(&vmax301_spin);
- to += thislen;
- from += thislen;
- len -= thislen;
- }
-}
-
-static void vmax301_write8(struct map_info *map, map_word d, unsigned long adr)
-{
- spin_lock(&vmax301_spin);
- vmax301_page(map, adr);
- writeb(d.x[0], map->map_priv_2 + (adr & WINDOW_MASK));
- spin_unlock(&vmax301_spin);
-}
-
-static void vmax301_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- while(len) {
- unsigned long thislen = len;
- if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
- thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
-
- spin_lock(&vmax301_spin);
- vmax301_page(map, to);
- memcpy_toio(map->map_priv_2 + to, from, thislen);
- spin_unlock(&vmax301_spin);
- to += thislen;
- from += thislen;
- len -= thislen;
- }
-}
-
-static struct map_info vmax_map[2] = {
- {
- .name = "VMAX301 Internal Flash",
- .phys = NO_XIP,
- .size = 3*2*1024*1024,
- .bankwidth = 1,
- .read = vmax301_read8,
- .copy_from = vmax301_copy_from,
- .write = vmax301_write8,
- .copy_to = vmax301_copy_to,
- .map_priv_1 = WINDOW_START + WINDOW_LENGTH,
- .map_priv_2 = 0xFFFFFFFF
- },
- {
- .name = "VMAX301 Socket",
- .phys = NO_XIP,
- .size = 0,
- .bankwidth = 1,
- .read = vmax301_read8,
- .copy_from = vmax301_copy_from,
- .write = vmax301_write8,
- .copy_to = vmax301_copy_to,
- .map_priv_1 = WINDOW_START + (3*WINDOW_LENGTH),
- .map_priv_2 = 0xFFFFFFFF
- }
-};
-
-static struct mtd_info *vmax_mtd[2] = {NULL, NULL};
-
-static void __exit cleanup_vmax301(void)
-{
- int i;
-
- for (i=0; i<2; i++) {
- if (vmax_mtd[i]) {
- mtd_device_unregister(vmax_mtd[i]);
- map_destroy(vmax_mtd[i]);
- }
- }
- iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
-}
-
-static int __init init_vmax301(void)
-{
- int i;
- unsigned long iomapadr;
- // Print out our little header..
- printk("Tempustech VMAX 301 MEM:0x%x-0x%x\n",WINDOW_START,
- WINDOW_START+4*WINDOW_LENGTH);
-
- iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH*4);
- if (!iomapadr) {
- printk("Failed to ioremap memory region\n");
- return -EIO;
- }
- /* Put the address in the map's private data area.
- We store the actual MTD IO address rather than the
- address of the first half, because it's used more
- often.
- */
- vmax_map[0].map_priv_2 = iomapadr + WINDOW_START;
- vmax_map[1].map_priv_2 = iomapadr + (3*WINDOW_START);
-
- for (i=0; i<2; i++) {
- vmax_mtd[i] = do_map_probe("cfi_probe", &vmax_map[i]);
- if (!vmax_mtd[i])
- vmax_mtd[i] = do_map_probe("jedec", &vmax_map[i]);
- if (!vmax_mtd[i])
- vmax_mtd[i] = do_map_probe("map_ram", &vmax_map[i]);
- if (!vmax_mtd[i])
- vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
- if (vmax_mtd[i]) {
- vmax_mtd[i]->owner = THIS_MODULE;
- mtd_device_register(vmax_mtd[i], NULL, 0);
- }
- }
-
- if (!vmax_mtd[0] && !vmax_mtd[1]) {
- iounmap((void *)iomapadr);
- return -ENXIO;
- }
-
- return 0;
-}
-
-module_init(init_vmax301);
-module_exit(cleanup_vmax301);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("MTD map driver for Tempustech VMAX SBC301 board");
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 048c823..5e14d54 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -285,6 +285,16 @@ static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
mtd_bitflip_threshold_show,
mtd_bitflip_threshold_store);
+static ssize_t mtd_ecc_step_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
+
+}
+static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
+
static struct attribute *mtd_attrs[] = {
&dev_attr_type.attr,
&dev_attr_flags.attr,
@@ -296,6 +306,7 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_numeraseregions.attr,
&dev_attr_name.attr,
&dev_attr_ecc_strength.attr,
+ &dev_attr_ecc_step_size.attr,
&dev_attr_bitflip_threshold.attr,
NULL,
};
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3014933..6e732c3 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -516,6 +516,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
}
slave->mtd.ecclayout = master->ecclayout;
+ slave->mtd.ecc_step_size = master->ecc_step_size;
slave->mtd.ecc_strength = master->ecc_strength;
slave->mtd.bitflip_threshold = master->bitflip_threshold;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index c92f0f6..8b33b26 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1425,7 +1425,7 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
while ((this_opt = strsep(&parts, ",")) != NULL) {
- if (strict_strtoul(this_opt, 0, &part) < 0)
+ if (kstrtoul(this_opt, 0, &part) < 0)
return;
if (mtd->index == part)
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 50543f1..d885298 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -43,6 +43,7 @@ config MTD_SM_COMMON
config MTD_NAND_DENALI
tristate "Support Denali NAND controller"
+ depends on HAS_DMA
help
Enable support for the Denali NAND controller. This should be
combined with either the PCI or platform drivers to provide device
@@ -75,7 +76,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
config MTD_NAND_GPIO
tristate "GPIO NAND Flash driver"
- depends on GPIOLIB && ARM
+ depends on GPIOLIB
help
This enables a GPIO based NAND flash driver.
@@ -354,7 +355,7 @@ config MTD_NAND_ATMEL
config MTD_NAND_PXA3xx
tristate "Support for NAND flash devices on PXA3xx"
- depends on PXA3xx || ARCH_MMP
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION
help
This enables the driver for the NAND flash device found on
PXA3xx processors
@@ -432,13 +433,6 @@ config MTD_NAND_PLATFORM
devices. You will need to provide platform-specific functions
via platform_data.
-config MTD_ALAUDA
- tristate "MTD driver for Olympus MAUSB-10 and Fujifilm DPC-R1"
- depends on USB
- help
- These two (and possibly other) Alauda-based cardreaders for
- SmartMedia and xD allow raw flash access.
-
config MTD_NAND_ORION
tristate "NAND Flash support for Marvell Orion SoC"
depends on PLAT_ORION
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index bb81891..542b568 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
-obj-$(CONFIG_MTD_ALAUDA) += alauda.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
deleted file mode 100644
index 60a0dfd..0000000
--- a/drivers/mtd/nand/alauda.c
+++ /dev/null
@@ -1,723 +0,0 @@
-/*
- * MTD driver for Alauda chips
- *
- * Copyright (C) 2007 Joern Engel <joern@logfs.org>
- *
- * Based on drivers/usb/usb-skeleton.c which is:
- * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
- * and on drivers/usb/storage/alauda.c, which is:
- * (c) 2005 Daniel Drake <dsd@gentoo.org>
- *
- * Idea and initial work by Arnd Bergmann <arnd@arndb.de>
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kref.h>
-#include <linux/usb.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand_ecc.h>
-
-/* Control commands */
-#define ALAUDA_GET_XD_MEDIA_STATUS 0x08
-#define ALAUDA_ACK_XD_MEDIA_CHANGE 0x0a
-#define ALAUDA_GET_XD_MEDIA_SIG 0x86
-
-/* Common prefix */
-#define ALAUDA_BULK_CMD 0x40
-
-/* The two ports */
-#define ALAUDA_PORT_XD 0x00
-#define ALAUDA_PORT_SM 0x01
-
-/* Bulk commands */
-#define ALAUDA_BULK_READ_PAGE 0x84
-#define ALAUDA_BULK_READ_OOB 0x85 /* don't use, there's a chip bug */
-#define ALAUDA_BULK_READ_BLOCK 0x94
-#define ALAUDA_BULK_ERASE_BLOCK 0xa3
-#define ALAUDA_BULK_WRITE_PAGE 0xa4
-#define ALAUDA_BULK_WRITE_BLOCK 0xb4
-#define ALAUDA_BULK_RESET_MEDIA 0xe0
-
-/* Address shifting */
-#define PBA_LO(pba) ((pba & 0xF) << 5)
-#define PBA_HI(pba) (pba >> 3)
-#define PBA_ZONE(pba) (pba >> 11)
-
-#define TIMEOUT HZ
-
-static const struct usb_device_id alauda_table[] = {
- { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
- { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
- { }
-};
-MODULE_DEVICE_TABLE(usb, alauda_table);
-
-struct alauda_card {
- u8 id; /* id byte */
- u8 chipshift; /* 1<<chipshift total size */
- u8 pageshift; /* 1<<pageshift page size */
- u8 blockshift; /* 1<<blockshift block size */
-};
-
-struct alauda {
- struct usb_device *dev;
- struct usb_interface *interface;
- struct mtd_info *mtd;
- struct alauda_card *card;
- struct mutex card_mutex;
- u32 pagemask;
- u32 bytemask;
- u32 blockmask;
- unsigned int write_out;
- unsigned int bulk_in;
- unsigned int bulk_out;
- u8 port;
- struct kref kref;
-};
-
-static struct alauda_card alauda_card_ids[] = {
- /* NAND flash */
- { 0x6e, 20, 8, 12}, /* 1 MB */
- { 0xe8, 20, 8, 12}, /* 1 MB */
- { 0xec, 20, 8, 12}, /* 1 MB */
- { 0x64, 21, 8, 12}, /* 2 MB */
- { 0xea, 21, 8, 12}, /* 2 MB */
- { 0x6b, 22, 9, 13}, /* 4 MB */
- { 0xe3, 22, 9, 13}, /* 4 MB */
- { 0xe5, 22, 9, 13}, /* 4 MB */
- { 0xe6, 23, 9, 13}, /* 8 MB */
- { 0x73, 24, 9, 14}, /* 16 MB */
- { 0x75, 25, 9, 14}, /* 32 MB */
- { 0x76, 26, 9, 14}, /* 64 MB */
- { 0x79, 27, 9, 14}, /* 128 MB */
- { 0x71, 28, 9, 14}, /* 256 MB */
-
- /* MASK ROM */
- { 0x5d, 21, 9, 13}, /* 2 MB */
- { 0xd5, 22, 9, 13}, /* 4 MB */
- { 0xd6, 23, 9, 13}, /* 8 MB */
- { 0x57, 24, 9, 13}, /* 16 MB */
- { 0x58, 25, 9, 13}, /* 32 MB */
- { }
-};
-
-static struct alauda_card *get_card(u8 id)
-{
- struct alauda_card *card;
-
- for (card = alauda_card_ids; card->id; card++)
- if (card->id == id)
- return card;
- return NULL;
-}
-
-static void alauda_delete(struct kref *kref)
-{
- struct alauda *al = container_of(kref, struct alauda, kref);
-
- if (al->mtd) {
- mtd_device_unregister(al->mtd);
- kfree(al->mtd);
- }
- usb_put_dev(al->dev);
- kfree(al);
-}
-
-static int alauda_get_media_status(struct alauda *al, void *buf)
-{
- int ret;
-
- mutex_lock(&al->card_mutex);
- ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
- ALAUDA_GET_XD_MEDIA_STATUS, 0xc0, 0, 1, buf, 2, HZ);
- mutex_unlock(&al->card_mutex);
- return ret;
-}
-
-static int alauda_ack_media(struct alauda *al)
-{
- int ret;
-
- mutex_lock(&al->card_mutex);
- ret = usb_control_msg(al->dev, usb_sndctrlpipe(al->dev, 0),
- ALAUDA_ACK_XD_MEDIA_CHANGE, 0x40, 0, 1, NULL, 0, HZ);
- mutex_unlock(&al->card_mutex);
- return ret;
-}
-
-static int alauda_get_media_signatures(struct alauda *al, void *buf)
-{
- int ret;
-
- mutex_lock(&al->card_mutex);
- ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
- ALAUDA_GET_XD_MEDIA_SIG, 0xc0, 0, 0, buf, 4, HZ);
- mutex_unlock(&al->card_mutex);
- return ret;
-}
-
-static void alauda_reset(struct alauda *al)
-{
- u8 command[] = {
- ALAUDA_BULK_CMD, ALAUDA_BULK_RESET_MEDIA, 0, 0,
- 0, 0, 0, 0, al->port
- };
- mutex_lock(&al->card_mutex);
- usb_bulk_msg(al->dev, al->bulk_out, command, 9, NULL, HZ);
- mutex_unlock(&al->card_mutex);
-}
-
-static void correct_data(void *buf, void *read_ecc,
- int *corrected, int *uncorrected)
-{
- u8 calc_ecc[3];
- int err;
-
- nand_calculate_ecc(NULL, buf, calc_ecc);
- err = nand_correct_data(NULL, buf, read_ecc, calc_ecc);
- if (err) {
- if (err > 0)
- (*corrected)++;
- else
- (*uncorrected)++;
- }
-}
-
-struct alauda_sg_request {
- struct urb *urb[3];
- struct completion comp;
-};
-
-static void alauda_complete(struct urb *urb)
-{
- struct completion *comp = urb->context;
-
- if (comp)
- complete(comp);
-}
-
-static int __alauda_read_page(struct mtd_info *mtd, loff_t from, void *buf,
- void *oob)
-{
- struct alauda_sg_request sg;
- struct alauda *al = mtd->priv;
- u32 pba = from >> al->card->blockshift;
- u32 page = (from >> al->card->pageshift) & al->pagemask;
- u8 command[] = {
- ALAUDA_BULK_CMD, ALAUDA_BULK_READ_PAGE, PBA_HI(pba),
- PBA_ZONE(pba), 0, PBA_LO(pba) + page, 1, 0, al->port
- };
- int i, err;
-
- for (i=0; i<3; i++)
- sg.urb[i] = NULL;
-
- err = -ENOMEM;
- for (i=0; i<3; i++) {
- sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
- if (!sg.urb[i])
- goto out;
- }
- init_completion(&sg.comp);
- usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, mtd->writesize,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[2], al->dev, al->bulk_in, oob, 16,
- alauda_complete, &sg.comp);
-
- mutex_lock(&al->card_mutex);
- for (i=0; i<3; i++) {
- err = usb_submit_urb(sg.urb[i], GFP_NOIO);
- if (err)
- goto cancel;
- }
- if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
- err = -ETIMEDOUT;
-cancel:
- for (i=0; i<3; i++) {
- usb_kill_urb(sg.urb[i]);
- }
- }
- mutex_unlock(&al->card_mutex);
-
-out:
- usb_free_urb(sg.urb[0]);
- usb_free_urb(sg.urb[1]);
- usb_free_urb(sg.urb[2]);
- return err;
-}
-
-static int alauda_read_page(struct mtd_info *mtd, loff_t from,
- void *buf, u8 *oob, int *corrected, int *uncorrected)
-{
- int err;
-
- err = __alauda_read_page(mtd, from, buf, oob);
- if (err)
- return err;
- correct_data(buf, oob+13, corrected, uncorrected);
- correct_data(buf+256, oob+8, corrected, uncorrected);
- return 0;
-}
-
-static int alauda_write_page(struct mtd_info *mtd, loff_t to, void *buf,
- void *oob)
-{
- struct alauda_sg_request sg;
- struct alauda *al = mtd->priv;
- u32 pba = to >> al->card->blockshift;
- u32 page = (to >> al->card->pageshift) & al->pagemask;
- u8 command[] = {
- ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_PAGE, PBA_HI(pba),
- PBA_ZONE(pba), 0, PBA_LO(pba) + page, 32, 0, al->port
- };
- int i, err;
-
- for (i=0; i<3; i++)
- sg.urb[i] = NULL;
-
- err = -ENOMEM;
- for (i=0; i<3; i++) {
- sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
- if (!sg.urb[i])
- goto out;
- }
- init_completion(&sg.comp);
- usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[1], al->dev, al->write_out, buf,mtd->writesize,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[2], al->dev, al->write_out, oob, 16,
- alauda_complete, &sg.comp);
-
- mutex_lock(&al->card_mutex);
- for (i=0; i<3; i++) {
- err = usb_submit_urb(sg.urb[i], GFP_NOIO);
- if (err)
- goto cancel;
- }
- if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
- err = -ETIMEDOUT;
-cancel:
- for (i=0; i<3; i++) {
- usb_kill_urb(sg.urb[i]);
- }
- }
- mutex_unlock(&al->card_mutex);
-
-out:
- usb_free_urb(sg.urb[0]);
- usb_free_urb(sg.urb[1]);
- usb_free_urb(sg.urb[2]);
- return err;
-}
-
-static int alauda_erase_block(struct mtd_info *mtd, loff_t ofs)
-{
- struct alauda_sg_request sg;
- struct alauda *al = mtd->priv;
- u32 pba = ofs >> al->card->blockshift;
- u8 command[] = {
- ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba),
- PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, al->port
- };
- u8 buf[2];
- int i, err;
-
- for (i=0; i<2; i++)
- sg.urb[i] = NULL;
-
- err = -ENOMEM;
- for (i=0; i<2; i++) {
- sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
- if (!sg.urb[i])
- goto out;
- }
- init_completion(&sg.comp);
- usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
- alauda_complete, NULL);
- usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, 2,
- alauda_complete, &sg.comp);
-
- mutex_lock(&al->card_mutex);
- for (i=0; i<2; i++) {
- err = usb_submit_urb(sg.urb[i], GFP_NOIO);
- if (err)
- goto cancel;
- }
- if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
- err = -ETIMEDOUT;
-cancel:
- for (i=0; i<2; i++) {
- usb_kill_urb(sg.urb[i]);
- }
- }
- mutex_unlock(&al->card_mutex);
-
-out:
- usb_free_urb(sg.urb[0]);
- usb_free_urb(sg.urb[1]);
- return err;
-}
-
-static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
-{
- static u8 ignore_buf[512]; /* write only */
-
- return __alauda_read_page(mtd, from, ignore_buf, oob);
-}
-
-static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- u8 oob[16];
- int err;
-
- err = alauda_read_oob(mtd, ofs, oob);
- if (err)
- return err;
-
- /* A block is marked bad if two or more bits are zero */
- return hweight8(oob[5]) >= 7 ? 0 : 1;
-}
-
-static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct alauda *al = mtd->priv;
- void *bounce_buf;
- int err, corrected=0, uncorrected=0;
-
- bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
- if (!bounce_buf)
- return -ENOMEM;
-
- *retlen = len;
- while (len) {
- u8 oob[16];
- size_t byte = from & al->bytemask;
- size_t cplen = min(len, mtd->writesize - byte);
-
- err = alauda_read_page(mtd, from, bounce_buf, oob,
- &corrected, &uncorrected);
- if (err)
- goto out;
-
- memcpy(buf, bounce_buf + byte, cplen);
- buf += cplen;
- from += cplen;
- len -= cplen;
- }
- err = 0;
- if (corrected)
- err = 1; /* return max_bitflips per ecc step */
- if (uncorrected)
- err = -EBADMSG;
-out:
- kfree(bounce_buf);
- return err;
-}
-
-static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct alauda *al = mtd->priv;
- int err, corrected=0, uncorrected=0;
-
- if ((from & al->bytemask) || (len & al->bytemask))
- return alauda_bounce_read(mtd, from, len, retlen, buf);
-
- *retlen = len;
- while (len) {
- u8 oob[16];
-
- err = alauda_read_page(mtd, from, buf, oob,
- &corrected, &uncorrected);
- if (err)
- return err;
-
- buf += mtd->writesize;
- from += mtd->writesize;
- len -= mtd->writesize;
- }
- err = 0;
- if (corrected)
- err = 1; /* return max_bitflips per ecc step */
- if (uncorrected)
- err = -EBADMSG;
- return err;
-}
-
-static int alauda_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct alauda *al = mtd->priv;
- int err;
-
- if ((to & al->bytemask) || (len & al->bytemask))
- return -EINVAL;
-
- *retlen = len;
- while (len) {
- u32 page = (to >> al->card->pageshift) & al->pagemask;
- u8 oob[16] = { 'h', 'e', 'l', 'l', 'o', 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- /* don't write to bad blocks */
- if (page == 0) {
- err = alauda_isbad(mtd, to);
- if (err) {
- return -EIO;
- }
- }
- nand_calculate_ecc(mtd, buf, &oob[13]);
- nand_calculate_ecc(mtd, buf+256, &oob[8]);
-
- err = alauda_write_page(mtd, to, (void*)buf, oob);
- if (err)
- return err;
-
- buf += mtd->writesize;
- to += mtd->writesize;
- len -= mtd->writesize;
- }
- return 0;
-}
-
-static int __alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct alauda *al = mtd->priv;
- u32 ofs = instr->addr;
- u32 len = instr->len;
- int err;
-
- if ((ofs & al->blockmask) || (len & al->blockmask))
- return -EINVAL;
-
- while (len) {
- /* don't erase bad blocks */
- err = alauda_isbad(mtd, ofs);
- if (err > 0)
- err = -EIO;
- if (err < 0)
- return err;
-
- err = alauda_erase_block(mtd, ofs);
- if (err < 0)
- return err;
-
- ofs += mtd->erasesize;
- len -= mtd->erasesize;
- }
- return 0;
-}
-
-static int alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- int err;
-
- err = __alauda_erase(mtd, instr);
- instr->state = err ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
- mtd_erase_callback(instr);
- return err;
-}
-
-static int alauda_init_media(struct alauda *al)
-{
- u8 buf[4], *b0=buf, *b1=buf+1;
- struct alauda_card *card;
- struct mtd_info *mtd;
- int err;
-
- mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd)
- return -ENOMEM;
-
- for (;;) {
- err = alauda_get_media_status(al, buf);
- if (err < 0)
- goto error;
- if (*b0 & 0x10)
- break;
- msleep(20);
- }
-
- err = alauda_ack_media(al);
- if (err)
- goto error;
-
- msleep(10);
-
- err = alauda_get_media_status(al, buf);
- if (err < 0)
- goto error;
-
- if (*b0 != 0x14) {
- /* media not ready */
- err = -EIO;
- goto error;
- }
- err = alauda_get_media_signatures(al, buf);
- if (err < 0)
- goto error;
-
- card = get_card(*b1);
- if (!card) {
- printk(KERN_ERR"Alauda: unknown card id %02x\n", *b1);
- err = -EIO;
- goto error;
- }
- printk(KERN_INFO"pagesize=%x\nerasesize=%x\nsize=%xMiB\n",
- 1<<card->pageshift, 1<<card->blockshift,
- 1<<(card->chipshift-20));
- al->card = card;
- al->pagemask = (1 << (card->blockshift - card->pageshift)) - 1;
- al->bytemask = (1 << card->pageshift) - 1;
- al->blockmask = (1 << card->blockshift) - 1;
-
- mtd->name = "alauda";
- mtd->size = 1<<card->chipshift;
- mtd->erasesize = 1<<card->blockshift;
- mtd->writesize = 1<<card->pageshift;
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->_read = alauda_read;
- mtd->_write = alauda_write;
- mtd->_erase = alauda_erase;
- mtd->_block_isbad = alauda_isbad;
- mtd->priv = al;
- mtd->owner = THIS_MODULE;
- mtd->ecc_strength = 1;
-
- err = mtd_device_register(mtd, NULL, 0);
- if (err) {
- err = -ENFILE;
- goto error;
- }
-
- al->mtd = mtd;
- alauda_reset(al); /* no clue whether this is necessary */
- return 0;
-error:
- kfree(mtd);
- return err;
-}
-
-static int alauda_check_media(struct alauda *al)
-{
- u8 buf[2], *b0 = buf, *b1 = buf+1;
- int err;
-
- err = alauda_get_media_status(al, buf);
- if (err < 0)
- return err;
-
- if ((*b1 & 0x01) == 0) {
- /* door open */
- return -EIO;
- }
- if ((*b0 & 0x80) || ((*b0 & 0x1F) == 0x10)) {
- /* no media ? */
- return -EIO;
- }
- if (*b0 & 0x08) {
- /* media change ? */
- return alauda_init_media(al);
- }
- return 0;
-}
-
-static int alauda_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct alauda *al;
- struct usb_host_interface *iface;
- struct usb_endpoint_descriptor *ep,
- *ep_in=NULL, *ep_out=NULL, *ep_wr=NULL;
- int i, err = -ENOMEM;
-
- al = kzalloc(2*sizeof(*al), GFP_KERNEL);
- if (!al)
- goto error;
-
- kref_init(&al->kref);
- usb_set_intfdata(interface, al);
-
- al->dev = usb_get_dev(interface_to_usbdev(interface));
- al->interface = interface;
-
- iface = interface->cur_altsetting;
- for (i = 0; i < iface->desc.bNumEndpoints; ++i) {
- ep = &iface->endpoint[i].desc;
-
- if (usb_endpoint_is_bulk_in(ep)) {
- ep_in = ep;
- } else if (usb_endpoint_is_bulk_out(ep)) {
- if (i==0)
- ep_wr = ep;
- else
- ep_out = ep;
- }
- }
- err = -EIO;
- if (!ep_wr || !ep_in || !ep_out)
- goto error;
-
- al->write_out = usb_sndbulkpipe(al->dev,
- usb_endpoint_num(ep_wr));
- al->bulk_in = usb_rcvbulkpipe(al->dev,
- usb_endpoint_num(ep_in));
- al->bulk_out = usb_sndbulkpipe(al->dev,
- usb_endpoint_num(ep_out));
-
- /* second device is identical up to now */
- memcpy(al+1, al, sizeof(*al));
-
- mutex_init(&al[0].card_mutex);
- mutex_init(&al[1].card_mutex);
-
- al[0].port = ALAUDA_PORT_XD;
- al[1].port = ALAUDA_PORT_SM;
-
- dev_info(&interface->dev, "alauda probed\n");
- alauda_check_media(al);
- alauda_check_media(al+1);
-
- return 0;
-
-error:
- if (al)
- kref_put(&al->kref, alauda_delete);
- return err;
-}
-
-static void alauda_disconnect(struct usb_interface *interface)
-{
- struct alauda *al;
-
- al = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
-
- /* FIXME: prevent more I/O from starting */
-
- /* decrement our usage count */
- if (al)
- kref_put(&al->kref, alauda_delete);
-
- dev_info(&interface->dev, "alauda gone");
-}
-
-static struct usb_driver alauda_driver = {
- .name = "alauda",
- .probe = alauda_probe,
- .disconnect = alauda_disconnect,
- .id_table = alauda_table,
-};
-
-module_usb_driver(alauda_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index f1d71cd..8611eb4 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -258,7 +258,6 @@ static int ams_delta_init(struct platform_device *pdev)
out_mtd:
gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
out_gpio:
- platform_set_drvdata(pdev, NULL);
gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
out_free:
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2d23d29..060feea 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -18,6 +18,9 @@
* Add Programmable Multibit ECC support for various AT91 SoC
* © Copyright 2012 ATMEL, Hong Xu
*
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -37,13 +40,12 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_data/atmel.h>
-#include <linux/pinctrl/consumer.h>
-
-#include <mach/cpu.h>
static int use_dma = 1;
module_param(use_dma, int, 0);
@@ -58,6 +60,7 @@ module_param(on_flash_bbt, int, 0);
__raw_writel((value), add + ATMEL_ECC_##reg)
#include "atmel_nand_ecc.h" /* Hardware ECC registers */
+#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */
/* oob layout for large page size
* bad block info is on bytes 0 and 1
@@ -85,6 +88,23 @@ static struct nand_ecclayout atmel_oobinfo_small = {
},
};
+struct atmel_nfc {
+ void __iomem *base_cmd_regs;
+ void __iomem *hsmc_regs;
+ void __iomem *sram_bank0;
+ dma_addr_t sram_bank0_phys;
+ bool use_nfc_sram;
+ bool write_by_sram;
+
+ bool is_initialized;
+ struct completion comp_nfc;
+
+ /* Point to the sram bank which include readed data via NFC */
+ void __iomem *data_in_sram;
+ bool will_write_sram;
+};
+static struct atmel_nfc nand_nfc;
+
struct atmel_nand_host {
struct nand_chip nand_chip;
struct mtd_info mtd;
@@ -97,6 +117,8 @@ struct atmel_nand_host {
struct completion comp;
struct dma_chan *dma_chan;
+ struct atmel_nfc *nfc;
+
bool has_pmecc;
u8 pmecc_corr_cap;
u16 pmecc_sector_size;
@@ -128,11 +150,6 @@ struct atmel_nand_host {
static struct nand_ecclayout atmel_pmecc_oobinfo;
-static int cpu_has_dma(void)
-{
- return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
-}
-
/*
* Enable NAND.
*/
@@ -186,21 +203,103 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
!!host->board.rdy_pin_active_low;
}
+/* Set up for hardware ready pin and enable pin. */
+static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ int res = 0;
+
+ if (gpio_is_valid(host->board.rdy_pin)) {
+ res = devm_gpio_request(host->dev,
+ host->board.rdy_pin, "nand_rdy");
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request rdy gpio %d\n",
+ host->board.rdy_pin);
+ return res;
+ }
+
+ res = gpio_direction_input(host->board.rdy_pin);
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request input direction rdy gpio %d\n",
+ host->board.rdy_pin);
+ return res;
+ }
+
+ chip->dev_ready = atmel_nand_device_ready;
+ }
+
+ if (gpio_is_valid(host->board.enable_pin)) {
+ res = devm_gpio_request(host->dev,
+ host->board.enable_pin, "nand_enable");
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request enable gpio %d\n",
+ host->board.enable_pin);
+ return res;
+ }
+
+ res = gpio_direction_output(host->board.enable_pin, 1);
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request output direction enable gpio %d\n",
+ host->board.enable_pin);
+ return res;
+ }
+ }
+
+ return res;
+}
+
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = readl_relaxed(s++);
+}
+
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ u32 __iomem *t = trg;
+ const u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ writel_relaxed(*s++, t++);
+}
+
/*
* Minimal-overhead PIO for data access.
*/
static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
- __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+ if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+ memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+ host->nfc->data_in_sram += len;
+ } else {
+ __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+ }
}
static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
- __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+ if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+ memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+ host->nfc->data_in_sram += len;
+ } else {
+ __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+ }
}
static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
@@ -222,6 +321,40 @@ static void dma_complete_func(void *completion)
complete(completion);
}
+static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank)
+{
+ /* NFC only has two banks. Must be 0 or 1 */
+ if (bank > 1)
+ return -EINVAL;
+
+ if (bank) {
+ /* Only for a 2k-page or lower flash, NFC can handle 2 banks */
+ if (host->mtd.writesize > 2048)
+ return -EINVAL;
+ nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1);
+ } else {
+ nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0);
+ }
+
+ return 0;
+}
+
+static uint nfc_get_sram_off(struct atmel_nand_host *host)
+{
+ if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+ return NFC_SRAM_BANK1_OFFSET;
+ else
+ return 0;
+}
+
+static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host)
+{
+ if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+ return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET;
+ else
+ return host->nfc->sram_bank0_phys;
+}
+
static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
int is_read)
{
@@ -235,6 +368,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
void *p = buf;
int err = -EIO;
enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct atmel_nfc *nfc = host->nfc;
if (buf >= high_memory)
goto err_buf;
@@ -251,11 +385,20 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
}
if (is_read) {
- dma_src_addr = host->io_phys;
+ if (nfc && nfc->data_in_sram)
+ dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram
+ - (nfc->sram_bank0 + nfc_get_sram_off(host)));
+ else
+ dma_src_addr = host->io_phys;
+
dma_dst_addr = phys_addr;
} else {
dma_src_addr = phys_addr;
- dma_dst_addr = host->io_phys;
+
+ if (nfc && nfc->write_by_sram)
+ dma_dst_addr = nfc_sram_phys(host);
+ else
+ dma_dst_addr = host->io_phys;
}
tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
@@ -278,6 +421,10 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
dma_async_issue_pending(host->dma_chan);
wait_for_completion(&host->comp);
+ if (is_read && nfc && nfc->data_in_sram)
+ /* After read data from SRAM, need to increase the position */
+ nfc->data_in_sram += len;
+
err = 0;
err_dma:
@@ -366,43 +513,34 @@ static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
table_size * sizeof(int16_t);
}
-static void pmecc_data_free(struct atmel_nand_host *host)
-{
- kfree(host->pmecc_partial_syn);
- kfree(host->pmecc_si);
- kfree(host->pmecc_lmu);
- kfree(host->pmecc_smu);
- kfree(host->pmecc_mu);
- kfree(host->pmecc_dmu);
- kfree(host->pmecc_delta);
-}
-
static int pmecc_data_alloc(struct atmel_nand_host *host)
{
const int cap = host->pmecc_corr_cap;
+ int size;
+
+ size = (2 * cap + 1) * sizeof(int16_t);
+ host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_lmu = devm_kzalloc(host->dev,
+ (cap + 1) * sizeof(int16_t), GFP_KERNEL);
+ host->pmecc_smu = devm_kzalloc(host->dev,
+ (cap + 2) * size, GFP_KERNEL);
+
+ size = (cap + 1) * sizeof(int);
+ host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL);
+
+ if (!host->pmecc_partial_syn ||
+ !host->pmecc_si ||
+ !host->pmecc_lmu ||
+ !host->pmecc_smu ||
+ !host->pmecc_mu ||
+ !host->pmecc_dmu ||
+ !host->pmecc_delta)
+ return -ENOMEM;
- host->pmecc_partial_syn = kzalloc((2 * cap + 1) * sizeof(int16_t),
- GFP_KERNEL);
- host->pmecc_si = kzalloc((2 * cap + 1) * sizeof(int16_t), GFP_KERNEL);
- host->pmecc_lmu = kzalloc((cap + 1) * sizeof(int16_t), GFP_KERNEL);
- host->pmecc_smu = kzalloc((cap + 2) * (2 * cap + 1) * sizeof(int16_t),
- GFP_KERNEL);
- host->pmecc_mu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
- host->pmecc_dmu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
- host->pmecc_delta = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
-
- if (host->pmecc_partial_syn &&
- host->pmecc_si &&
- host->pmecc_lmu &&
- host->pmecc_smu &&
- host->pmecc_mu &&
- host->pmecc_dmu &&
- host->pmecc_delta)
- return 0;
-
- /* error happened */
- pmecc_data_free(host);
- return -ENOMEM;
+ return 0;
}
static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
@@ -763,6 +901,30 @@ normal_check:
return total_err;
}
+static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
+{
+ u32 val;
+
+ if (ecc_op != NAND_ECC_READ && ecc_op != NAND_ECC_WRITE) {
+ dev_err(host->dev, "atmel_nand: wrong pmecc operation type!");
+ return;
+ }
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+ val = pmecc_readl_relaxed(host->ecc, CFG);
+
+ if (ecc_op == NAND_ECC_READ)
+ pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP)
+ | PMECC_CFG_AUTO_ENABLE);
+ else
+ pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP)
+ & ~PMECC_CFG_AUTO_ENABLE);
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+}
+
static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
@@ -774,13 +936,8 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
unsigned long end_time;
int bitflips = 0;
- pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
- pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
- pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG)
- & ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE);
-
- pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
- pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+ if (!host->nfc || !host->nfc->use_nfc_sram)
+ pmecc_enable(host, NAND_ECC_READ);
chip->read_buf(mtd, buf, eccsize);
chip->read_buf(mtd, oob, mtd->oobsize);
@@ -813,16 +970,10 @@ static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
int i, j;
unsigned long end_time;
- pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
- pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
-
- pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG) |
- PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE);
-
- pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
- pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
-
- chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+ if (!host->nfc || !host->nfc->write_by_sram) {
+ pmecc_enable(host, NAND_ECC_WRITE);
+ chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+ }
end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
@@ -967,11 +1118,11 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
host->pmecc_corr_cap = 2;
else if (*cap <= 4)
host->pmecc_corr_cap = 4;
- else if (*cap < 8)
+ else if (*cap <= 8)
host->pmecc_corr_cap = 8;
- else if (*cap < 12)
+ else if (*cap <= 12)
host->pmecc_corr_cap = 12;
- else if (*cap < 24)
+ else if (*cap <= 24)
host->pmecc_corr_cap = 24;
else
return -EINVAL;
@@ -1002,7 +1153,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
return err_no;
}
- if (cap != host->pmecc_corr_cap ||
+ if (cap > host->pmecc_corr_cap ||
sector_size != host->pmecc_sector_size)
dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
@@ -1023,27 +1174,28 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
return 0;
}
- host->ecc = ioremap(regs->start, resource_size(regs));
- if (host->ecc == NULL) {
+ host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->ecc)) {
dev_err(host->dev, "ioremap failed\n");
- err_no = -EIO;
- goto err_pmecc_ioremap;
+ err_no = PTR_ERR(host->ecc);
+ goto err;
}
regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (regs_pmerr && regs_rom) {
- host->pmerrloc_base = ioremap(regs_pmerr->start,
- resource_size(regs_pmerr));
- host->pmecc_rom_base = ioremap(regs_rom->start,
- resource_size(regs_rom));
+ host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr);
+ if (IS_ERR(host->pmerrloc_base)) {
+ dev_err(host->dev,
+ "Can not get I/O resource for PMECC ERRLOC controller!\n");
+ err_no = PTR_ERR(host->pmerrloc_base);
+ goto err;
}
- if (!host->pmerrloc_base || !host->pmecc_rom_base) {
- dev_err(host->dev,
- "Can not get I/O resource for PMECC ERRLOC controller or ROM!\n");
- err_no = -EIO;
- goto err_pmloc_ioremap;
+ regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, regs_rom);
+ if (IS_ERR(host->pmecc_rom_base)) {
+ dev_err(host->dev, "Can not get I/O resource for ROM!\n");
+ err_no = PTR_ERR(host->pmecc_rom_base);
+ goto err;
}
/* ECC is calculated for the whole page (1 step) */
@@ -1052,7 +1204,8 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
/* set ECC page size and oob layout */
switch (mtd->writesize) {
case 2048:
- host->pmecc_degree = PMECC_GF_DIMENSION_13;
+ host->pmecc_degree = (sector_size == 512) ?
+ PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
host->pmecc_sector_number = mtd->writesize / sector_size;
host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
@@ -1068,7 +1221,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
dev_err(host->dev, "No room for ECC bytes\n");
err_no = -EINVAL;
- goto err_no_ecc_room;
+ goto err;
}
pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
mtd->oobsize,
@@ -1093,7 +1246,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
if (err_no) {
dev_err(host->dev,
"Cannot allocate memory for PMECC computation!\n");
- goto err_pmecc_data_alloc;
+ goto err;
}
nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
@@ -1103,15 +1256,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
return 0;
-err_pmecc_data_alloc:
-err_no_ecc_room:
-err_pmloc_ioremap:
- iounmap(host->ecc);
- if (host->pmerrloc_base)
- iounmap(host->pmerrloc_base);
- if (host->pmecc_rom_base)
- iounmap(host->pmecc_rom_base);
-err_pmecc_ioremap:
+err:
return err_no;
}
@@ -1174,10 +1319,9 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
* Workaround: Reset the parity registers before reading the
* actual data.
*/
- if (cpu_is_at32ap7000()) {
- struct atmel_nand_host *host = chip->priv;
+ struct atmel_nand_host *host = chip->priv;
+ if (host->board.need_reset_workaround)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
- }
/* read the page */
chip->read_buf(mtd, p, eccsize);
@@ -1298,11 +1442,11 @@ static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
*/
static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
{
- if (cpu_is_at32ap7000()) {
- struct nand_chip *nand_chip = mtd->priv;
- struct atmel_nand_host *host = nand_chip->priv;
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (host->board.need_reset_workaround)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
- }
}
#if defined(CONFIG_OF)
@@ -1337,6 +1481,8 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
+ board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
+
if (of_get_nand_bus_width(np) == 16)
board->bus_width_16 = 1;
@@ -1348,6 +1494,9 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
+ /* load the nfc driver if there is */
+ of_platform_populate(np, NULL, NULL, host->dev);
+
if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
return 0; /* Not using PMECC */
@@ -1414,10 +1563,10 @@ static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
return 0;
}
- host->ecc = ioremap(regs->start, resource_size(regs));
- if (host->ecc == NULL) {
+ host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->ecc)) {
dev_err(host->dev, "ioremap failed\n");
- return -EIO;
+ return PTR_ERR(host->ecc);
}
/* ECC is calculated for the whole page (1 step) */
@@ -1459,6 +1608,382 @@ static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
return 0;
}
+/* SMC interrupt service routine */
+static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
+{
+ struct atmel_nand_host *host = dev_id;
+ u32 status, mask, pending;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ status = nfc_readl(host->nfc->hsmc_regs, SR);
+ mask = nfc_readl(host->nfc->hsmc_regs, IMR);
+ pending = status & mask;
+
+ if (pending & NFC_SR_XFR_DONE) {
+ complete(&host->nfc->comp_nfc);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
+ } else if (pending & NFC_SR_RB_EDGE) {
+ complete(&host->nfc->comp_nfc);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
+ } else if (pending & NFC_SR_CMD_DONE) {
+ complete(&host->nfc->comp_nfc);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ return ret;
+}
+
+/* NFC(Nand Flash Controller) related functions */
+static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
+{
+ unsigned long timeout;
+ init_completion(&host->nfc->comp_nfc);
+
+ /* Enable interrupt that need to wait for */
+ nfc_writel(host->nfc->hsmc_regs, IER, flag);
+
+ timeout = wait_for_completion_timeout(&host->nfc->comp_nfc,
+ msecs_to_jiffies(NFC_TIME_OUT_MS));
+ if (timeout)
+ return 0;
+
+ /* Time out to wait for the interrupt */
+ dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
+ return -ETIMEDOUT;
+}
+
+static int nfc_send_command(struct atmel_nand_host *host,
+ unsigned int cmd, unsigned int addr, unsigned char cycle0)
+{
+ unsigned long timeout;
+ dev_dbg(host->dev,
+ "nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
+ cmd, addr, cycle0);
+
+ timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+ while (nfc_cmd_readl(NFCADDR_CMD_NFCBUSY, host->nfc->base_cmd_regs)
+ & NFCADDR_CMD_NFCBUSY) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(host->dev,
+ "Time out to wait CMD_NFCBUSY ready!\n");
+ return -ETIMEDOUT;
+ }
+ }
+ nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
+ nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
+ return nfc_wait_interrupt(host, NFC_SR_CMD_DONE);
+}
+
+static int nfc_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ if (!nfc_wait_interrupt(host, NFC_SR_RB_EDGE))
+ return 1;
+ return 0;
+}
+
+static void nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (chip == -1)
+ nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE);
+ else
+ nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);
+}
+
+static int nfc_make_addr(struct mtd_info *mtd, int column, int page_addr,
+ unsigned int *addr1234, unsigned int *cycle0)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ int acycle = 0;
+ unsigned char addr_bytes[8];
+ int index = 0, bit_shift;
+
+ BUG_ON(addr1234 == NULL || cycle0 == NULL);
+
+ *cycle0 = 0;
+ *addr1234 = 0;
+
+ if (column != -1) {
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ addr_bytes[acycle++] = column & 0xff;
+ if (mtd->writesize > 512)
+ addr_bytes[acycle++] = (column >> 8) & 0xff;
+ }
+
+ if (page_addr != -1) {
+ addr_bytes[acycle++] = page_addr & 0xff;
+ addr_bytes[acycle++] = (page_addr >> 8) & 0xff;
+ if (chip->chipsize > (128 << 20))
+ addr_bytes[acycle++] = (page_addr >> 16) & 0xff;
+ }
+
+ if (acycle > 4)
+ *cycle0 = addr_bytes[index++];
+
+ for (bit_shift = 0; index < acycle; bit_shift += 8)
+ *addr1234 += addr_bytes[index++] << bit_shift;
+
+ /* return acycle in cmd register */
+ return acycle << NFCADDR_CMD_ACYCLE_BIT_POS;
+}
+
+static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ unsigned long timeout;
+ unsigned int nfc_addr_cmd = 0;
+
+ unsigned int cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+
+ /* Set default settings: no cmd2, no addr cycle. read from nand */
+ unsigned int cmd2 = 0;
+ unsigned int vcmd2 = 0;
+ int acycle = NFCADDR_CMD_ACYCLE_NONE;
+ int csid = NFCADDR_CMD_CSID_3;
+ int dataen = NFCADDR_CMD_DATADIS;
+ int nfcwr = NFCADDR_CMD_NFCRD;
+ unsigned int addr1234 = 0;
+ unsigned int cycle0 = 0;
+ bool do_addr = true;
+ host->nfc->data_in_sram = NULL;
+
+ dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n",
+ __func__, command, column, page_addr);
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ nfc_addr_cmd = cmd1 | acycle | csid | dataen | nfcwr;
+ nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+ udelay(chip->chip_delay);
+
+ nfc_nand_command(mtd, NAND_CMD_STATUS, -1, -1);
+ timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(host->dev,
+ "Time out to wait status ready!\n");
+ break;
+ }
+ }
+ return;
+ case NAND_CMD_STATUS:
+ do_addr = false;
+ break;
+ case NAND_CMD_PARAM:
+ case NAND_CMD_READID:
+ do_addr = false;
+ acycle = NFCADDR_CMD_ACYCLE_1;
+ if (column != -1)
+ addr1234 = column;
+ break;
+ case NAND_CMD_RNDOUT:
+ cmd2 = NAND_CMD_RNDOUTSTART << NFCADDR_CMD_CMD2_BIT_POS;
+ vcmd2 = NFCADDR_CMD_VCMD2;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0; /* only READ0 is valid */
+ cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+ }
+ if (host->nfc->use_nfc_sram) {
+ /* Enable Data transfer to sram */
+ dataen = NFCADDR_CMD_DATAEN;
+
+ /* Need enable PMECC now, since NFC will transfer
+ * data in bus after sending nfc read command.
+ */
+ if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+ pmecc_enable(host, NAND_ECC_READ);
+ }
+
+ cmd2 = NAND_CMD_READSTART << NFCADDR_CMD_CMD2_BIT_POS;
+ vcmd2 = NFCADDR_CMD_VCMD2;
+ break;
+ /* For prgramming command, the cmd need set to write enable */
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ nfcwr = NFCADDR_CMD_NFCWR;
+ if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN)
+ dataen = NFCADDR_CMD_DATAEN;
+ break;
+ default:
+ break;
+ }
+
+ if (do_addr)
+ acycle = nfc_make_addr(mtd, column, page_addr, &addr1234,
+ &cycle0);
+
+ nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
+ nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+
+ if (dataen == NFCADDR_CMD_DATAEN)
+ if (nfc_wait_interrupt(host, NFC_SR_XFR_DONE))
+ dev_err(host->dev, "something wrong, No XFR_DONE interrupt comes.\n");
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in, and deplete1 need no delay.
+ */
+ switch (command) {
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_READID:
+ return;
+
+ case NAND_CMD_READ0:
+ if (dataen == NFCADDR_CMD_DATAEN) {
+ host->nfc->data_in_sram = host->nfc->sram_bank0 +
+ nfc_get_sram_off(host);
+ return;
+ }
+ /* fall through */
+ default:
+ nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
+ }
+}
+
+static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
+{
+ int cfg, len;
+ int status = 0;
+ struct atmel_nand_host *host = chip->priv;
+ void __iomem *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
+
+ /* Subpage write is not supported */
+ if (offset || (data_len < mtd->writesize))
+ return -EINVAL;
+
+ cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
+ len = mtd->writesize;
+
+ if (unlikely(raw)) {
+ len += mtd->oobsize;
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
+ } else
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE);
+
+ /* Copy page data to sram that will write to nand via NFC */
+ if (use_dma) {
+ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
+ /* Fall back to use cpu copy */
+ memcpy32_toio(sram, buf, len);
+ } else {
+ memcpy32_toio(sram, buf, len);
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+ /*
+ * When use NFC sram, need set up PMECC before send
+ * NAND_CMD_SEQIN command. Since when the nand command
+ * is sent, nfc will do transfer from sram and nand.
+ */
+ pmecc_enable(host, NAND_ECC_WRITE);
+
+ host->nfc->will_write_sram = true;
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ host->nfc->will_write_sram = false;
+
+ if (likely(!raw))
+ /* Need to write ecc into oob */
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int nfc_sram_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ int res = 0;
+
+ /* Initialize the NFC CFG register */
+ unsigned int cfg_nfc = 0;
+
+ /* set page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ cfg_nfc = NFC_CFG_PAGESIZE_512;
+ break;
+ case 1024:
+ cfg_nfc = NFC_CFG_PAGESIZE_1024;
+ break;
+ case 2048:
+ cfg_nfc = NFC_CFG_PAGESIZE_2048;
+ break;
+ case 4096:
+ cfg_nfc = NFC_CFG_PAGESIZE_4096;
+ break;
+ case 8192:
+ cfg_nfc = NFC_CFG_PAGESIZE_8192;
+ break;
+ default:
+ dev_err(host->dev, "Unsupported page size for NFC.\n");
+ res = -ENXIO;
+ return res;
+ }
+
+ /* oob bytes size = (NFCSPARESIZE + 1) * 4
+ * Max support spare size is 512 bytes. */
+ cfg_nfc |= (((mtd->oobsize / 4) - 1) << NFC_CFG_NFC_SPARESIZE_BIT_POS
+ & NFC_CFG_NFC_SPARESIZE);
+ /* default set a max timeout */
+ cfg_nfc |= NFC_CFG_RSPARE |
+ NFC_CFG_NFC_DTOCYC | NFC_CFG_NFC_DTOMUL;
+
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc);
+
+ host->nfc->will_write_sram = false;
+ nfc_set_sram_bank(host, 0);
+
+ /* Use Write page with NFC SRAM only for PMECC or ECC NONE. */
+ if (host->nfc->write_by_sram) {
+ if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) ||
+ chip->ecc.mode == NAND_ECC_NONE)
+ chip->write_page = nfc_sram_write_page;
+ else
+ host->nfc->write_by_sram = false;
+ }
+
+ dev_info(host->dev, "Using NFC Sram read %s\n",
+ host->nfc->write_by_sram ? "and write" : "");
+ return 0;
+}
+
+static struct platform_driver atmel_nand_nfc_driver;
/*
* Probe for the NAND device.
*/
@@ -1469,30 +1994,27 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
struct nand_chip *nand_chip;
struct resource *mem;
struct mtd_part_parser_data ppdata = {};
- int res;
- struct pinctrl *pinctrl;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- printk(KERN_ERR "atmel_nand: can't get I/O resource mem\n");
- return -ENXIO;
- }
+ int res, irq;
/* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct atmel_nand_host), GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host) {
printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
return -ENOMEM;
}
- host->io_phys = (dma_addr_t)mem->start;
+ res = platform_driver_register(&atmel_nand_nfc_driver);
+ if (res)
+ dev_err(&pdev->dev, "atmel_nand: can't register NFC driver\n");
- host->io_base = ioremap(mem->start, resource_size(mem));
- if (host->io_base == NULL) {
- printk(KERN_ERR "atmel_nand: ioremap failed\n");
- res = -EIO;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->io_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(host->io_base)) {
+ dev_err(&pdev->dev, "atmel_nand: ioremap resource failed\n");
+ res = PTR_ERR(host->io_base);
goto err_nand_ioremap;
}
+ host->io_phys = (dma_addr_t)mem->start;
mtd = &host->mtd;
nand_chip = &host->nand_chip;
@@ -1500,9 +2022,9 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
res = atmel_of_init_port(host, pdev->dev.of_node);
if (res)
- goto err_ecc_ioremap;
+ goto err_nand_ioremap;
} else {
- memcpy(&host->board, pdev->dev.platform_data,
+ memcpy(&host->board, dev_get_platdata(&pdev->dev),
sizeof(struct atmel_nand_data));
}
@@ -1513,51 +2035,36 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = host->io_base;
nand_chip->IO_ADDR_W = host->io_base;
- nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- dev_err(host->dev, "Failed to request pinctrl\n");
- res = PTR_ERR(pinctrl);
- goto err_ecc_ioremap;
- }
+ if (nand_nfc.is_initialized) {
+ /* NFC driver is probed and initialized */
+ host->nfc = &nand_nfc;
- if (gpio_is_valid(host->board.rdy_pin)) {
- res = gpio_request(host->board.rdy_pin, "nand_rdy");
- if (res < 0) {
- dev_err(&pdev->dev,
- "can't request rdy gpio %d\n",
- host->board.rdy_pin);
- goto err_ecc_ioremap;
- }
+ nand_chip->select_chip = nfc_select_chip;
+ nand_chip->dev_ready = nfc_device_ready;
+ nand_chip->cmdfunc = nfc_nand_command;
- res = gpio_direction_input(host->board.rdy_pin);
- if (res < 0) {
- dev_err(&pdev->dev,
- "can't request input direction rdy gpio %d\n",
- host->board.rdy_pin);
- goto err_ecc_ioremap;
+ /* Initialize the interrupt for NFC */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(host->dev, "Cannot get HSMC irq!\n");
+ res = irq;
+ goto err_nand_ioremap;
}
- nand_chip->dev_ready = atmel_nand_device_ready;
- }
-
- if (gpio_is_valid(host->board.enable_pin)) {
- res = gpio_request(host->board.enable_pin, "nand_enable");
- if (res < 0) {
- dev_err(&pdev->dev,
- "can't request enable gpio %d\n",
- host->board.enable_pin);
- goto err_ecc_ioremap;
+ res = devm_request_irq(&pdev->dev, irq, hsmc_interrupt,
+ 0, "hsmc", host);
+ if (res) {
+ dev_err(&pdev->dev, "Unable to request HSMC irq %d\n",
+ irq);
+ goto err_nand_ioremap;
}
+ } else {
+ res = atmel_nand_set_enable_ready_pins(mtd);
+ if (res)
+ goto err_nand_ioremap;
- res = gpio_direction_output(host->board.enable_pin, 1);
- if (res < 0) {
- dev_err(&pdev->dev,
- "can't request output direction enable gpio %d\n",
- host->board.enable_pin);
- goto err_ecc_ioremap;
- }
+ nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
}
nand_chip->ecc.mode = host->board.ecc_mode;
@@ -1573,7 +2080,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
atmel_nand_enable(host);
if (gpio_is_valid(host->board.det_pin)) {
- res = gpio_request(host->board.det_pin, "nand_det");
+ res = devm_gpio_request(&pdev->dev,
+ host->board.det_pin, "nand_det");
if (res < 0) {
dev_err(&pdev->dev,
"can't request det gpio %d\n",
@@ -1601,7 +2109,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
}
- if (!cpu_has_dma())
+ if (!host->board.has_dma)
use_dma = 0;
if (use_dma) {
@@ -1637,6 +2145,15 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_hw_ecc;
}
+ /* initialize the nfc configuration register */
+ if (host->nfc && host->nfc->use_nfc_sram) {
+ res = nfc_sram_init(mtd);
+ if (res) {
+ host->nfc->use_nfc_sram = false;
+ dev_err(host->dev, "Disable use nfc sram for data transfer.\n");
+ }
+ }
+
/* second phase scan */
if (nand_scan_tail(mtd)) {
res = -ENXIO;
@@ -1651,27 +2168,16 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
return res;
err_scan_tail:
- if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+ if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW)
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
- pmecc_data_free(host);
- }
- if (host->ecc)
- iounmap(host->ecc);
- if (host->pmerrloc_base)
- iounmap(host->pmerrloc_base);
- if (host->pmecc_rom_base)
- iounmap(host->pmecc_rom_base);
err_hw_ecc:
err_scan_ident:
err_no_card:
atmel_nand_disable(host);
- platform_set_drvdata(pdev, NULL);
if (host->dma_chan)
dma_release_channel(host->dma_chan);
-err_ecc_ioremap:
- iounmap(host->io_base);
err_nand_ioremap:
- kfree(host);
+ platform_driver_unregister(&atmel_nand_nfc_driver);
return res;
}
@@ -1691,30 +2197,12 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
pmerrloc_writel(host->pmerrloc_base, ELDIS,
PMERRLOC_DISABLE);
- pmecc_data_free(host);
}
- if (gpio_is_valid(host->board.det_pin))
- gpio_free(host->board.det_pin);
-
- if (gpio_is_valid(host->board.enable_pin))
- gpio_free(host->board.enable_pin);
-
- if (gpio_is_valid(host->board.rdy_pin))
- gpio_free(host->board.rdy_pin);
-
- if (host->ecc)
- iounmap(host->ecc);
- if (host->pmecc_rom_base)
- iounmap(host->pmecc_rom_base);
- if (host->pmerrloc_base)
- iounmap(host->pmerrloc_base);
-
if (host->dma_chan)
dma_release_channel(host->dma_chan);
- iounmap(host->io_base);
- kfree(host);
+ platform_driver_unregister(&atmel_nand_nfc_driver);
return 0;
}
@@ -1728,6 +2216,59 @@ static const struct of_device_id atmel_nand_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
#endif
+static int atmel_nand_nfc_probe(struct platform_device *pdev)
+{
+ struct atmel_nfc *nfc = &nand_nfc;
+ struct resource *nfc_cmd_regs, *nfc_hsmc_regs, *nfc_sram;
+
+ nfc_cmd_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->base_cmd_regs = devm_ioremap_resource(&pdev->dev, nfc_cmd_regs);
+ if (IS_ERR(nfc->base_cmd_regs))
+ return PTR_ERR(nfc->base_cmd_regs);
+
+ nfc_hsmc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ nfc->hsmc_regs = devm_ioremap_resource(&pdev->dev, nfc_hsmc_regs);
+ if (IS_ERR(nfc->hsmc_regs))
+ return PTR_ERR(nfc->hsmc_regs);
+
+ nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (nfc_sram) {
+ nfc->sram_bank0 = devm_ioremap_resource(&pdev->dev, nfc_sram);
+ if (IS_ERR(nfc->sram_bank0)) {
+ dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
+ PTR_ERR(nfc->sram_bank0));
+ } else {
+ nfc->use_nfc_sram = true;
+ nfc->sram_bank0_phys = (dma_addr_t)nfc_sram->start;
+
+ if (pdev->dev.of_node)
+ nfc->write_by_sram = of_property_read_bool(
+ pdev->dev.of_node,
+ "atmel,write-by-sram");
+ }
+ }
+
+ nfc->is_initialized = true;
+ dev_info(&pdev->dev, "NFC is probed.\n");
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static struct of_device_id atmel_nand_nfc_match[] = {
+ { .compatible = "atmel,sama5d3-nfc" },
+ { /* sentinel */ }
+};
+#endif
+
+static struct platform_driver atmel_nand_nfc_driver = {
+ .driver = {
+ .name = "atmel_nand_nfc",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_nand_nfc_match),
+ },
+ .probe = atmel_nand_nfc_probe,
+};
+
static struct platform_driver atmel_nand_driver = {
.remove = __exit_p(atmel_nand_remove),
.driver = {
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
new file mode 100644
index 0000000..4efd117
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -0,0 +1,98 @@
+/*
+ * Atmel Nand Flash Controller (NFC) - System peripherals regsters.
+ * Based on SAMA5D3 datasheet.
+ *
+ * © Copyright 2013 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_NFC_H
+#define ATMEL_NAND_NFC_H
+
+/*
+ * HSMC NFC registers
+ */
+#define ATMEL_HSMC_NFC_CFG 0x00 /* NFC Configuration Register */
+#define NFC_CFG_PAGESIZE (7 << 0)
+#define NFC_CFG_PAGESIZE_512 (0 << 0)
+#define NFC_CFG_PAGESIZE_1024 (1 << 0)
+#define NFC_CFG_PAGESIZE_2048 (2 << 0)
+#define NFC_CFG_PAGESIZE_4096 (3 << 0)
+#define NFC_CFG_PAGESIZE_8192 (4 << 0)
+#define NFC_CFG_WSPARE (1 << 8)
+#define NFC_CFG_RSPARE (1 << 9)
+#define NFC_CFG_NFC_DTOCYC (0xf << 16)
+#define NFC_CFG_NFC_DTOMUL (0x7 << 20)
+#define NFC_CFG_NFC_SPARESIZE (0x7f << 24)
+#define NFC_CFG_NFC_SPARESIZE_BIT_POS 24
+
+#define ATMEL_HSMC_NFC_CTRL 0x04 /* NFC Control Register */
+#define NFC_CTRL_ENABLE (1 << 0)
+#define NFC_CTRL_DISABLE (1 << 1)
+
+#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */
+#define NFC_SR_XFR_DONE (1 << 16)
+#define NFC_SR_CMD_DONE (1 << 17)
+#define NFC_SR_RB_EDGE (1 << 24)
+
+#define ATMEL_HSMC_NFC_IER 0x0c
+#define ATMEL_HSMC_NFC_IDR 0x10
+#define ATMEL_HSMC_NFC_IMR 0x14
+#define ATMEL_HSMC_NFC_CYCLE0 0x18 /* NFC Address Cycle Zero */
+#define ATMEL_HSMC_NFC_ADDR_CYCLE0 (0xff)
+
+#define ATMEL_HSMC_NFC_BANK 0x1c /* NFC Bank Register */
+#define ATMEL_HSMC_NFC_BANK0 (0 << 0)
+#define ATMEL_HSMC_NFC_BANK1 (1 << 0)
+
+#define nfc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_HSMC_NFC_##reg)
+
+#define nfc_readl(addr, reg) \
+ readl_relaxed((addr) + ATMEL_HSMC_NFC_##reg)
+
+/*
+ * NFC Address Command definitions
+ */
+#define NFCADDR_CMD_CMD1 (0xff << 2) /* Command for Cycle 1 */
+#define NFCADDR_CMD_CMD1_BIT_POS 2
+#define NFCADDR_CMD_CMD2 (0xff << 10) /* Command for Cycle 2 */
+#define NFCADDR_CMD_CMD2_BIT_POS 10
+#define NFCADDR_CMD_VCMD2 (0x1 << 18) /* Valid Cycle 2 Command */
+#define NFCADDR_CMD_ACYCLE (0x7 << 19) /* Number of Address required */
+#define NFCADDR_CMD_ACYCLE_NONE (0x0 << 19)
+#define NFCADDR_CMD_ACYCLE_1 (0x1 << 19)
+#define NFCADDR_CMD_ACYCLE_2 (0x2 << 19)
+#define NFCADDR_CMD_ACYCLE_3 (0x3 << 19)
+#define NFCADDR_CMD_ACYCLE_4 (0x4 << 19)
+#define NFCADDR_CMD_ACYCLE_5 (0x5 << 19)
+#define NFCADDR_CMD_ACYCLE_BIT_POS 19
+#define NFCADDR_CMD_CSID (0x7 << 22) /* Chip Select Identifier */
+#define NFCADDR_CMD_CSID_0 (0x0 << 22)
+#define NFCADDR_CMD_CSID_1 (0x1 << 22)
+#define NFCADDR_CMD_CSID_2 (0x2 << 22)
+#define NFCADDR_CMD_CSID_3 (0x3 << 22)
+#define NFCADDR_CMD_CSID_4 (0x4 << 22)
+#define NFCADDR_CMD_CSID_5 (0x5 << 22)
+#define NFCADDR_CMD_CSID_6 (0x6 << 22)
+#define NFCADDR_CMD_CSID_7 (0x7 << 22)
+#define NFCADDR_CMD_DATAEN (0x1 << 25) /* Data Transfer Enable */
+#define NFCADDR_CMD_DATADIS (0x0 << 25) /* Data Transfer Disable */
+#define NFCADDR_CMD_NFCRD (0x0 << 26) /* NFC Read Enable */
+#define NFCADDR_CMD_NFCWR (0x1 << 26) /* NFC Write Enable */
+#define NFCADDR_CMD_NFCBUSY (0x1 << 27) /* NFC Busy */
+
+#define nfc_cmd_addr1234_writel(cmd, addr1234, nfc_base) \
+ writel((addr1234), (cmd) + nfc_base)
+
+#define nfc_cmd_readl(bitstatus, nfc_base) \
+ readl_relaxed((bitstatus) + nfc_base)
+
+#define NFC_TIME_OUT_MS 100
+#define NFC_SRAM_BANK1_OFFSET 0x1200
+
+#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 217459d..ae8dd7c 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -411,7 +411,7 @@ static int au1550nd_probe(struct platform_device *pdev)
struct resource *r;
int ret, cs;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (!pd) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 776df36..2c42e12 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -171,7 +171,7 @@ static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev)
static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev)
{
- return pdev->dev.platform_data;
+ return dev_get_platdata(&pdev->dev);
}
/*
@@ -671,8 +671,6 @@ static int bf5xx_nand_remove(struct platform_device *pdev)
{
struct bf5xx_nand_info *info = to_nand_info(pdev);
- platform_set_drvdata(pdev, NULL);
-
/* first thing we need to do is release all our mtds
* and their partitions, then go through freeing the
* resources used
@@ -832,7 +830,6 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
out_err_nand_scan:
bf5xx_nand_dma_remove(info);
out_err_hw_init:
- platform_set_drvdata(pdev, NULL);
kfree(info);
out_err_kzalloc:
peripheral_free_list(bfin_nfc_pin_req);
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 2cdeab8..d469a9a 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -197,7 +197,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
}
/* Allocate memory for MTD device structure and private data */
- new_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ new_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!new_mtd) {
printk(KERN_WARNING "Unable to allocate CS553X NAND MTD device structure.\n");
err = -ENOMEM;
@@ -207,10 +207,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
/* Get pointer to private data */
this = (struct nand_chip *)(&new_mtd[1]);
- /* Initialize structures */
- memset(new_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
/* Link the private data with the MTD structure */
new_mtd->priv = this;
new_mtd->owner = THIS_MODULE;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index c3e15a5..b77a01e 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -530,7 +530,7 @@ MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
static struct davinci_nand_pdata
*nand_davinci_get_pdata(struct platform_device *pdev)
{
- if (!pdev->dev.platform_data && pdev->dev.of_node) {
+ if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
struct davinci_nand_pdata *pdata;
const char *mode;
u32 prop;
@@ -575,13 +575,13 @@ static struct davinci_nand_pdata
pdata->bbt_options = NAND_BBT_USE_FLASH;
}
- return pdev->dev.platform_data;
+ return dev_get_platdata(&pdev->dev);
}
#else
static struct davinci_nand_pdata
*nand_davinci_get_pdata(struct platform_device *pdev)
{
- return pdev->dev.platform_data;
+ return dev_get_platdata(&pdev->dev);
}
#endif
@@ -623,11 +623,14 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = devm_request_and_ioremap(&pdev->dev, res1);
- base = devm_request_and_ioremap(&pdev->dev, res2);
- if (!vaddr || !base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EADDRNOTAVAIL;
+ vaddr = devm_ioremap_resource(&pdev->dev, res1);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_ioremap;
+ }
+ base = devm_ioremap_resource(&pdev->dev, res2);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
goto err_ioremap;
}
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 0c8bb6b..2ed2bb3 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1520,7 +1520,7 @@ int denali_init(struct denali_nand_info *denali)
* so just let controller do 15bit ECC for MLC and 8bit ECC for
* SLC if possible.
* */
- if (denali->nand.cellinfo & 0xc &&
+ if (denali->nand.cellinfo & NAND_CI_CELLTYPE_MSK &&
(denali->mtd.oobsize > (denali->bbtskipbytes +
ECC_15BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE)))) {
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 81fa578..eaa3c29 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -46,13 +46,13 @@ static unsigned long __initdata doc_locations[] = {
0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else /* CONFIG_MTD_DOCPROBE_HIGH */
+#else
0xc8000, 0xca000, 0xcc000, 0xce000,
0xd0000, 0xd2000, 0xd4000, 0xd6000,
0xd8000, 0xda000, 0xdc000, 0xde000,
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
-#endif /* CONFIG_MTD_DOCPROBE_HIGH */
+#endif
#endif
0xffffffff };
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index fa25e7a..548db23 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -1093,7 +1093,6 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
struct nand_bbt_descr *bbtd = nand->badblock_pattern;
- int block = (int)(ofs >> nand->bbt_erase_shift);
int page = (int)(ofs >> nand->page_shift);
uint32_t g4_addr = mtd_to_docg4_address(page, 0);
@@ -1108,9 +1107,6 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (buf == NULL)
return -ENOMEM;
- /* update bbt in memory */
- nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
-
/* write bit-wise negation of pattern to oob buffer */
memset(nand->oob_poi, 0xff, mtd->oobsize);
for (i = 0; i < bbtd->len; i++)
@@ -1120,8 +1116,6 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
write_page_prologue(mtd, g4_addr);
docg4_write_page(mtd, nand, buf, 1);
ret = pageprog(mtd);
- if (!ret)
- mtd->ecc_stats.badblocks++;
kfree(buf);
@@ -1368,7 +1362,6 @@ static int __init probe_docg4(struct platform_device *pdev)
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
nand_release(mtd); /* deletes partitions and mtd devices */
- platform_set_drvdata(pdev, NULL);
free_bch(doc->bch);
kfree(mtd);
}
@@ -1380,7 +1373,6 @@ static int __exit cleanup_docg4(struct platform_device *pdev)
{
struct docg4_priv *doc = platform_get_drvdata(pdev);
nand_release(doc->mtd);
- platform_set_drvdata(pdev, NULL);
free_bch(doc->bch);
kfree(doc->mtd);
iounmap(doc->virtadr);
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index f1f7f12..317a771 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -823,7 +823,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
-
+ chip->options = NAND_NO_SUBPAGE_WRITE;
if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
chip->read_byte = fsl_ifc_read_byte16;
@@ -908,7 +908,6 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
ifc_nand_ctrl->chips[priv->bank] = NULL;
dev_set_drvdata(priv->dev, NULL);
- kfree(priv);
return 0;
}
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 911e243..3dc1a75 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -889,6 +889,24 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
if (of_get_property(np, "nand-skip-bbtscan", NULL))
pdata->options = NAND_SKIP_BBTSCAN;
+ pdata->nand_timings = devm_kzalloc(&pdev->dev,
+ sizeof(*pdata->nand_timings), GFP_KERNEL);
+ if (!pdata->nand_timings) {
+ dev_err(&pdev->dev, "no memory for nand_timing\n");
+ return -ENOMEM;
+ }
+ of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
+ sizeof(*pdata->nand_timings));
+
+ /* Set default NAND bank to 0 */
+ pdata->bank = 0;
+ if (!of_property_read_u32(np, "bank", &val)) {
+ if (val > 3) {
+ dev_err(&pdev->dev, "invalid bank %u\n", val);
+ return -EINVAL;
+ }
+ pdata->bank = val;
+ }
return 0;
}
#else
@@ -940,9 +958,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- if (!res)
- return -EINVAL;
-
host->data_va = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->data_va))
return PTR_ERR(host->data_va);
@@ -950,25 +965,16 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->data_pa = (dma_addr_t)res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
- if (!res)
- return -EINVAL;
-
host->addr_va = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->addr_va))
return PTR_ERR(host->addr_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
- if (!res)
- return -EINVAL;
-
host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->cmd_va))
return PTR_ERR(host->cmd_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
- if (!res)
- return -EINVAL;
-
host->regs_va = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->regs_va))
return PTR_ERR(host->regs_va);
@@ -1174,8 +1180,6 @@ static int fsmc_nand_remove(struct platform_device *pdev)
{
struct fsmc_nand_data *host = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
if (host) {
nand_release(&host->mtd);
@@ -1190,7 +1194,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int fsmc_nand_suspend(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
@@ -1210,9 +1214,9 @@ static int fsmc_nand_resume(struct device *dev)
}
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
-#endif
#ifdef CONFIG_OF
static const struct of_device_id fsmc_nand_id_table[] = {
@@ -1229,9 +1233,7 @@ static struct platform_driver fsmc_nand_driver = {
.owner = THIS_MODULE,
.name = "fsmc-nand",
.of_match_table = of_match_ptr(fsmc_nand_id_table),
-#ifdef CONFIG_PM
.pm = &fsmc_nand_pm_ops,
-#endif
},
};
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 89065dd..e826f89 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -17,6 +17,7 @@
*/
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -86,59 +87,11 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd);
}
-static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
-
- iowrite8_rep(this->IO_ADDR_W, buf, len);
-}
-
-static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
-
- ioread8_rep(this->IO_ADDR_R, buf, len);
-}
-
-static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
- int len)
-{
- struct nand_chip *this = mtd->priv;
-
- if (IS_ALIGNED((unsigned long)buf, 2)) {
- iowrite16_rep(this->IO_ADDR_W, buf, len>>1);
- } else {
- int i;
- unsigned short *ptr = (unsigned short *)buf;
-
- for (i = 0; i < len; i += 2, ptr++)
- writew(*ptr, this->IO_ADDR_W);
- }
-}
-
-static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
-
- if (IS_ALIGNED((unsigned long)buf, 2)) {
- ioread16_rep(this->IO_ADDR_R, buf, len>>1);
- } else {
- int i;
- unsigned short *ptr = (unsigned short *)buf;
-
- for (i = 0; i < len; i += 2, ptr++)
- *ptr = readw(this->IO_ADDR_R);
- }
-}
-
static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
-
- return 1;
+ return gpio_get_value(gpiomtd->plat.gpio_rdy);
}
#ifdef CONFIG_OF
@@ -153,6 +106,9 @@ static int gpio_nand_get_config_of(const struct device *dev,
{
u32 val;
+ if (!dev->of_node)
+ return -ENODEV;
+
if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
if (val == 2) {
plat->options |= NAND_BUSWIDTH_16;
@@ -211,8 +167,8 @@ static inline int gpio_nand_get_config(const struct device *dev,
if (!ret)
return ret;
- if (dev->platform_data) {
- memcpy(plat, dev->platform_data, sizeof(*plat));
+ if (dev_get_platdata(dev)) {
+ memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
return 0;
}
@@ -230,145 +186,100 @@ gpio_nand_get_io_sync(struct platform_device *pdev)
return platform_get_resource(pdev, IORESOURCE_MEM, 1);
}
-static int gpio_nand_remove(struct platform_device *dev)
+static int gpio_nand_remove(struct platform_device *pdev)
{
- struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
- struct resource *res;
+ struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
nand_release(&gpiomtd->mtd_info);
- res = gpio_nand_get_io_sync(dev);
- iounmap(gpiomtd->io_sync);
- if (res)
- release_mem_region(res->start, resource_size(res));
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res->start, resource_size(res));
-
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
gpio_set_value(gpiomtd->plat.gpio_nce, 1);
- gpio_free(gpiomtd->plat.gpio_cle);
- gpio_free(gpiomtd->plat.gpio_ale);
- gpio_free(gpiomtd->plat.gpio_nce);
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_free(gpiomtd->plat.gpio_nwp);
- if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
- gpio_free(gpiomtd->plat.gpio_rdy);
-
return 0;
}
-static void __iomem *request_and_remap(struct resource *res, size_t size,
- const char *name, int *err)
-{
- void __iomem *ptr;
-
- if (!request_mem_region(res->start, resource_size(res), name)) {
- *err = -EBUSY;
- return NULL;
- }
-
- ptr = ioremap(res->start, size);
- if (!ptr) {
- release_mem_region(res->start, resource_size(res));
- *err = -ENOMEM;
- }
- return ptr;
-}
-
-static int gpio_nand_probe(struct platform_device *dev)
+static int gpio_nand_probe(struct platform_device *pdev)
{
struct gpiomtd *gpiomtd;
- struct nand_chip *this;
- struct resource *res0, *res1;
+ struct nand_chip *chip;
+ struct resource *res;
struct mtd_part_parser_data ppdata = {};
int ret = 0;
- if (!dev->dev.of_node && !dev->dev.platform_data)
- return -EINVAL;
-
- res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res0)
+ if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
return -EINVAL;
- gpiomtd = devm_kzalloc(&dev->dev, sizeof(*gpiomtd), GFP_KERNEL);
- if (gpiomtd == NULL) {
- dev_err(&dev->dev, "failed to create NAND MTD\n");
+ gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+ if (!gpiomtd) {
+ dev_err(&pdev->dev, "failed to create NAND MTD\n");
return -ENOMEM;
}
- this = &gpiomtd->nand_chip;
- this->IO_ADDR_R = request_and_remap(res0, 2, "NAND", &ret);
- if (!this->IO_ADDR_R) {
- dev_err(&dev->dev, "unable to map NAND\n");
- goto err_map;
- }
+ chip = &gpiomtd->nand_chip;
- res1 = gpio_nand_get_io_sync(dev);
- if (res1) {
- gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
- if (!gpiomtd->io_sync) {
- dev_err(&dev->dev, "unable to map sync NAND\n");
- goto err_sync;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chip->IO_ADDR_R))
+ return PTR_ERR(chip->IO_ADDR_R);
+
+ res = gpio_nand_get_io_sync(pdev);
+ if (res) {
+ gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpiomtd->io_sync))
+ return PTR_ERR(gpiomtd->io_sync);
}
- ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat);
+ ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
if (ret)
- goto err_nce;
+ return ret;
- ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE");
if (ret)
- goto err_nce;
+ return ret;
gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+
if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
- ret = gpio_request(gpiomtd->plat.gpio_nwp, "NAND NWP");
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
+ "NAND NWP");
if (ret)
- goto err_nwp;
- gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+ return ret;
}
- ret = gpio_request(gpiomtd->plat.gpio_ale, "NAND ALE");
+
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
if (ret)
- goto err_ale;
+ return ret;
gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
- ret = gpio_request(gpiomtd->plat.gpio_cle, "NAND CLE");
+
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
if (ret)
- goto err_cle;
+ return ret;
gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
+
if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
- ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
+ "NAND RDY");
if (ret)
- goto err_rdy;
+ return ret;
gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ chip->dev_ready = gpio_nand_devready;
}
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->options = gpiomtd->plat.options;
+ chip->chip_delay = gpiomtd->plat.chip_delay;
+ chip->cmd_ctrl = gpio_nand_cmd_ctrl;
- this->IO_ADDR_W = this->IO_ADDR_R;
- this->ecc.mode = NAND_ECC_SOFT;
- this->options = gpiomtd->plat.options;
- this->chip_delay = gpiomtd->plat.chip_delay;
-
- /* install our routines */
- this->cmd_ctrl = gpio_nand_cmd_ctrl;
- this->dev_ready = gpio_nand_devready;
+ gpiomtd->mtd_info.priv = chip;
+ gpiomtd->mtd_info.owner = THIS_MODULE;
- if (this->options & NAND_BUSWIDTH_16) {
- this->read_buf = gpio_nand_readbuf16;
- this->write_buf = gpio_nand_writebuf16;
- } else {
- this->read_buf = gpio_nand_readbuf;
- this->write_buf = gpio_nand_writebuf;
- }
+ platform_set_drvdata(pdev, gpiomtd);
- /* set the mtd private data for the nand driver */
- gpiomtd->mtd_info.priv = this;
- gpiomtd->mtd_info.owner = THIS_MODULE;
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
if (nand_scan(&gpiomtd->mtd_info, 1)) {
- dev_err(&dev->dev, "no nand chips found?\n");
ret = -ENXIO;
goto err_wp;
}
@@ -377,38 +288,17 @@ static int gpio_nand_probe(struct platform_device *dev)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- ppdata.of_node = dev->dev.of_node;
+ ppdata.of_node = pdev->dev.of_node;
ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
gpiomtd->plat.parts,
gpiomtd->plat.num_parts);
- if (ret)
- goto err_wp;
- platform_set_drvdata(dev, gpiomtd);
-
- return 0;
+ if (!ret)
+ return 0;
err_wp:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
- gpio_free(gpiomtd->plat.gpio_rdy);
-err_rdy:
- gpio_free(gpiomtd->plat.gpio_cle);
-err_cle:
- gpio_free(gpiomtd->plat.gpio_ale);
-err_ale:
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_free(gpiomtd->plat.gpio_nwp);
-err_nwp:
- gpio_free(gpiomtd->plat.gpio_nce);
-err_nce:
- iounmap(gpiomtd->io_sync);
- if (res1)
- release_mem_region(res1->start, resource_size(res1));
-err_sync:
- iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res0->start, resource_size(res0));
-err_map:
+
return ret;
}
@@ -417,6 +307,7 @@ static struct platform_driver gpio_nand_driver = {
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
+ .owner = THIS_MODULE,
.of_match_table = of_match_ptr(gpio_nand_id_table),
},
};
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 25ecfa1..59ab069 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_mtd.h>
@@ -112,7 +111,131 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
return true;
}
-int common_nfc_set_geometry(struct gpmi_nand_data *this)
+/*
+ * If we can get the ECC information from the nand chip, we do not
+ * need to calculate them ourselves.
+ *
+ * We may have available oob space in this case.
+ */
+static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = mtd->priv;
+ struct nand_oobfree *of = gpmi_hw_ecclayout.oobfree;
+ unsigned int block_mark_bit_offset;
+
+ if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+ return false;
+
+ switch (chip->ecc_step_ds) {
+ case SZ_512:
+ geo->gf_len = 13;
+ break;
+ case SZ_1K:
+ geo->gf_len = 14;
+ break;
+ default:
+ dev_err(this->dev,
+ "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
+ chip->ecc_strength_ds, chip->ecc_step_ds);
+ return false;
+ }
+ geo->ecc_chunk_size = chip->ecc_step_ds;
+ geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
+ if (!gpmi_check_ecc(this))
+ return false;
+
+ /* Keep the C >= O */
+ if (geo->ecc_chunk_size < mtd->oobsize) {
+ dev_err(this->dev,
+ "unsupported nand chip. ecc size: %d, oob size : %d\n",
+ chip->ecc_step_ds, mtd->oobsize);
+ return false;
+ }
+
+ /* The default value, see comment in the legacy_set_geometry(). */
+ geo->metadata_size = 10;
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /*
+ * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
+ *
+ * | P |
+ * |<----------------------------------------------------->|
+ * | |
+ * | (Block Mark) |
+ * | P' | | | |
+ * |<-------------------------------------------->| D | | O' |
+ * | |<---->| |<--->|
+ * V V V V V
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * | M | data |E| data |E| data |E| data |E| |
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * ^ ^
+ * | O |
+ * |<------------>|
+ * | |
+ *
+ * P : the page size for BCH module.
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * M : the metasize of per page.
+ * C : the ecc chunk size, aka the "data" above.
+ * P': the nand chip's page size.
+ * O : the nand chip's oob size.
+ * O': the free oob.
+ *
+ * The formula for P is :
+ *
+ * E * G * N
+ * P = ------------ + P' + M
+ * 8
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * Please see the comment in legacy_set_geometry().
+ * With the condition C >= O , we still can get same result.
+ * So the bit position of the physical block mark within the ECC-based
+ * view of the page is :
+ * (P' - D) * 8
+ */
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+
+ /* The available oob size we have. */
+ if (geo->page_size < mtd->writesize + mtd->oobsize) {
+ of->offset = geo->page_size - mtd->writesize;
+ of->length = mtd->oobsize - of->offset;
+ }
+
+ geo->payload_size = mtd->writesize;
+
+ geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
+ geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+ + ALIGN(geo->ecc_chunk_count, 4);
+
+ if (!this->swap_block_mark)
+ return true;
+
+ /* For bit swap. */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return true;
+}
+
+static int legacy_set_geometry(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct mtd_info *mtd = &this->mtd;
@@ -224,6 +347,11 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
return 0;
}
+int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+ return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this);
+}
+
struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
{
int chipnr = this->current_chip;
@@ -355,7 +483,7 @@ static int acquire_register_block(struct gpmi_nand_data *this,
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
if (!r) {
pr_err("Can't get resource for %s\n", res_name);
- return -ENXIO;
+ return -ENODEV;
}
p = ioremap(r->start, resource_size(r));
@@ -396,7 +524,7 @@ static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
if (!r) {
pr_err("Can't get resource for %s\n", res_name);
- return -ENXIO;
+ return -ENODEV;
}
err = request_irq(r->start, irq_h, 0, res_name, this);
@@ -473,12 +601,14 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
struct resources *r = &this->resources;
char **extra_clks = NULL;
struct clk *clk;
- int i;
+ int err, i;
/* The main clock is stored in the first. */
r->clock[0] = clk_get(this->dev, "gpmi_io");
- if (IS_ERR(r->clock[0]))
+ if (IS_ERR(r->clock[0])) {
+ err = PTR_ERR(r->clock[0]);
goto err_clock;
+ }
/* Get extra clocks */
if (GPMI_IS_MX6Q(this))
@@ -491,8 +621,10 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
break;
clk = clk_get(this->dev, extra_clks[i - 1]);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
goto err_clock;
+ }
r->clock[i] = clk;
}
@@ -511,12 +643,11 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
err_clock:
dev_dbg(this->dev, "failed in finding the clocks.\n");
gpmi_put_clks(this);
- return -ENOMEM;
+ return err;
}
static int acquire_resources(struct gpmi_nand_data *this)
{
- struct pinctrl *pinctrl;
int ret;
ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
@@ -535,19 +666,12 @@ static int acquire_resources(struct gpmi_nand_data *this)
if (ret)
goto exit_dma_channels;
- pinctrl = devm_pinctrl_get_select_default(&this->pdev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- goto exit_pin;
- }
-
ret = gpmi_get_clks(this);
if (ret)
goto exit_clock;
return 0;
exit_clock:
-exit_pin:
release_dma_channels(this);
exit_dma_channels:
release_bch_irq(this);
@@ -1153,43 +1277,31 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
struct gpmi_nand_data *this = chip->priv;
- int block, ret = 0;
+ int ret = 0;
uint8_t *block_mark;
int column, page, status, chipnr;
- /* Get block number */
- block = (int)(ofs >> chip->bbt_erase_shift);
- if (chip->bbt)
- chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+ chipnr = (int)(ofs >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
- /* Do we have a flash based bad block table ? */
- if (chip->bbt_options & NAND_BBT_USE_FLASH)
- ret = nand_update_bbt(mtd, ofs);
- else {
- chipnr = (int)(ofs >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
+ column = this->swap_block_mark ? mtd->writesize : 0;
- column = this->swap_block_mark ? mtd->writesize : 0;
+ /* Write the block mark. */
+ block_mark = this->data_buffer_dma;
+ block_mark[0] = 0; /* bad block marker */
- /* Write the block mark. */
- block_mark = this->data_buffer_dma;
- block_mark[0] = 0; /* bad block marker */
+ /* Shift to get page */
+ page = (int)(ofs >> chip->page_shift);
- /* Shift to get page */
- page = (int)(ofs >> chip->page_shift);
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
+ chip->write_buf(mtd, block_mark, 1);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
- chip->write_buf(mtd, block_mark, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- ret = -EIO;
-
- chip->select_chip(mtd, -1);
- }
- if (!ret)
- mtd->ecc_stats.badblocks++;
+ chip->select_chip(mtd, -1);
return ret;
}
@@ -1469,19 +1581,22 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
if (ret)
return ret;
- /* Adjust the ECC strength according to the chip. */
- this->nand.ecc.strength = this->bch_geometry.ecc_strength;
- this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
- this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength;
-
/* NAND boot init, depends on the gpmi_set_geometry(). */
return nand_boot_init(this);
}
-static int gpmi_scan_bbt(struct mtd_info *mtd)
+static void gpmi_nfc_exit(struct gpmi_nand_data *this)
{
+ nand_release(&this->mtd);
+ gpmi_free_dma_buffer(this);
+}
+
+static int gpmi_init_last(struct gpmi_nand_data *this)
+{
+ struct mtd_info *mtd = &this->mtd;
struct nand_chip *chip = mtd->priv;
- struct gpmi_nand_data *this = chip->priv;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
int ret;
/* Prepare for the BBT scan. */
@@ -1489,6 +1604,16 @@ static int gpmi_scan_bbt(struct mtd_info *mtd)
if (ret)
return ret;
+ /* Init the nand_ecc_ctrl{} */
+ ecc->read_page = gpmi_ecc_read_page;
+ ecc->write_page = gpmi_ecc_write_page;
+ ecc->read_oob = gpmi_ecc_read_oob;
+ ecc->write_oob = gpmi_ecc_write_oob;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = bch_geo->ecc_chunk_size;
+ ecc->strength = bch_geo->ecc_strength;
+ ecc->layout = &gpmi_hw_ecclayout;
+
/*
* Can we enable the extra features? such as EDO or Sync mode.
*
@@ -1497,14 +1622,7 @@ static int gpmi_scan_bbt(struct mtd_info *mtd)
*/
gpmi_extra_init(this);
- /* use the default BBT implementation */
- return nand_default_bbt(mtd);
-}
-
-static void gpmi_nfc_exit(struct gpmi_nand_data *this)
-{
- nand_release(&this->mtd);
- gpmi_free_dma_buffer(this);
+ return 0;
}
static int gpmi_nfc_init(struct gpmi_nand_data *this)
@@ -1530,33 +1648,33 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
chip->read_byte = gpmi_read_byte;
chip->read_buf = gpmi_read_buf;
chip->write_buf = gpmi_write_buf;
- chip->ecc.read_page = gpmi_ecc_read_page;
- chip->ecc.write_page = gpmi_ecc_write_page;
- chip->ecc.read_oob = gpmi_ecc_read_oob;
- chip->ecc.write_oob = gpmi_ecc_write_oob;
- chip->scan_bbt = gpmi_scan_bbt;
chip->badblock_pattern = &gpmi_bbt_descr;
chip->block_markbad = gpmi_block_markbad;
chip->options |= NAND_NO_SUBPAGE_WRITE;
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = 1;
- chip->ecc.strength = 8;
- chip->ecc.layout = &gpmi_hw_ecclayout;
if (of_get_nand_on_flash_bbt(this->dev->of_node))
chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
- /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
+ /*
+ * Allocate a temporary DMA buffer for reading ID in the
+ * nand_scan_ident().
+ */
this->bch_geometry.payload_size = 1024;
this->bch_geometry.auxiliary_size = 128;
ret = gpmi_alloc_dma_buffer(this);
if (ret)
goto err_out;
- ret = nand_scan(mtd, 1);
- if (ret) {
- pr_err("Chip scan failed\n");
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ goto err_out;
+
+ ret = gpmi_init_last(this);
+ if (ret)
+ goto err_out;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
goto err_out;
- }
ppdata.of_node = this->pdev->dev.of_node;
ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
@@ -1601,7 +1719,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)
pdev->id_entry = of_id->data;
} else {
pr_err("Failed to find the right device id.\n");
- return -ENOMEM;
+ return -ENODEV;
}
this = kzalloc(sizeof(*this), GFP_KERNEL);
@@ -1633,7 +1751,6 @@ static int gpmi_nand_probe(struct platform_device *pdev)
exit_nfc_init:
release_resources(this);
exit_acquire_resources:
- platform_set_drvdata(pdev, NULL);
dev_err(this->dev, "driver registration failed: %d\n", ret);
kfree(this);
@@ -1646,7 +1763,6 @@ static int gpmi_nand_remove(struct platform_device *pdev)
gpmi_nfc_exit(this);
release_resources(this);
- platform_set_drvdata(pdev, NULL);
kfree(this);
return 0;
}
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index b76460e..a264b88 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -411,7 +411,7 @@ static int jz_nand_probe(struct platform_device *pdev)
struct jz_nand *nand;
struct nand_chip *chip;
struct mtd_info *mtd;
- struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
size_t chipnr, bank_idx;
uint8_t nand_maf_id = 0, nand_dev_id = 0;
@@ -538,7 +538,6 @@ err_unclaim_banks:
err_gpio_busy:
if (pdata && gpio_is_valid(pdata->busy_gpio))
gpio_free(pdata->busy_gpio);
- platform_set_drvdata(pdev, NULL);
err_iounmap_mmio:
jz_nand_iounmap_resource(nand->mem, nand->base);
err_free:
@@ -549,7 +548,7 @@ err_free:
static int jz_nand_remove(struct platform_device *pdev)
{
struct jz_nand *nand = platform_get_drvdata(pdev);
- struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
size_t i;
nand_release(&nand->mtd);
@@ -570,7 +569,6 @@ static int jz_nand_remove(struct platform_device *pdev)
jz_nand_iounmap_resource(nand->mem, nand->base);
- platform_set_drvdata(pdev, NULL);
kfree(nand);
return 0;
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index fd1df5e..f4dd2a8 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -696,7 +696,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
lpc32xx_wp_disable(host);
- host->pdata = pdev->dev.platform_data;
+ host->pdata = dev_get_platdata(&pdev->dev);
nand_chip->priv = host; /* link the private data structures */
mtd->priv = nand_chip;
@@ -828,7 +828,6 @@ err_exit3:
err_exit2:
clk_disable(host->clk);
clk_put(host->clk);
- platform_set_drvdata(pdev, NULL);
err_exit1:
lpc32xx_wp_enable(host);
gpio_free(host->ncfg->wp_gpio);
@@ -851,7 +850,6 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
clk_disable(host->clk);
clk_put(host->clk);
- platform_set_drvdata(pdev, NULL);
lpc32xx_wp_enable(host);
gpio_free(host->ncfg->wp_gpio);
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index be94ed5..add7570 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -798,7 +798,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
lpc32xx_wp_disable(host);
- host->pdata = pdev->dev.platform_data;
+ host->pdata = dev_get_platdata(&pdev->dev);
mtd = &host->mtd;
chip = &host->nand_chip;
@@ -936,7 +936,6 @@ err_exit3:
err_exit2:
clk_disable(host->clk);
clk_put(host->clk);
- platform_set_drvdata(pdev, NULL);
err_exit1:
lpc32xx_wp_enable(host);
gpio_free(host->ncfg->wp_gpio);
@@ -963,7 +962,6 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
clk_disable(host->clk);
clk_put(host->clk);
- platform_set_drvdata(pdev, NULL);
lpc32xx_wp_enable(host);
gpio_free(host->ncfg->wp_gpio);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 07e5784..ce8242b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -266,7 +266,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-static const char const *part_probes[] = {
+static const char * const part_probes[] = {
"cmdlinepart", "RedBoot", "ofpart", NULL };
static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
@@ -1432,7 +1432,8 @@ static int mxcnd_probe(struct platform_device *pdev)
err = mxcnd_probe_dt(host);
if (err > 0) {
- struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct mxc_nand_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
if (pdata) {
host->pdata = *pdata;
host->devtype_data = (struct mxc_nand_devtype_data *)
@@ -1446,8 +1447,6 @@ static int mxcnd_probe(struct platform_device *pdev)
if (host->devtype_data->needs_ip) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->regs_ip))
return PTR_ERR(host->regs_ip);
@@ -1457,9 +1456,6 @@ static int mxcnd_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
}
- if (!res)
- return -ENODEV;
-
host->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->base))
return PTR_ERR(host->base);
@@ -1578,8 +1574,6 @@ static int mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
nand_release(&host->mtd);
return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index dfcd0a5..7ed4841 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -108,13 +108,13 @@ static int check_offs_len(struct mtd_info *mtd,
int ret = 0;
/* Start address must align on block boundary */
- if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+ if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
pr_debug("%s: unaligned address\n", __func__);
ret = -EINVAL;
}
/* Length must align on block boundary */
- if (len & ((1 << chip->phys_erase_shift) - 1)) {
+ if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
pr_debug("%s: length not block aligned\n", __func__);
ret = -EINVAL;
}
@@ -211,11 +211,9 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
*/
static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- int i;
struct nand_chip *chip = mtd->priv;
- for (i = 0; i < len; i++)
- writeb(buf[i], chip->IO_ADDR_W);
+ iowrite8_rep(chip->IO_ADDR_W, buf, len);
}
/**
@@ -228,11 +226,9 @@ static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
*/
static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- int i;
struct nand_chip *chip = mtd->priv;
- for (i = 0; i < len; i++)
- buf[i] = readb(chip->IO_ADDR_R);
+ ioread8_rep(chip->IO_ADDR_R, buf, len);
}
/**
@@ -245,14 +241,10 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
*/
static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- int i;
struct nand_chip *chip = mtd->priv;
u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i = 0; i < len; i++)
- writew(p[i], chip->IO_ADDR_W);
+ iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
}
/**
@@ -265,13 +257,10 @@ static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
*/
static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
- int i;
struct nand_chip *chip = mtd->priv;
u16 *p = (u16 *) buf;
- len >>= 1;
- for (i = 0; i < len; i++)
- p[i] = readw(chip->IO_ADDR_R);
+ ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
}
/**
@@ -335,80 +324,88 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
}
/**
- * nand_default_block_markbad - [DEFAULT] mark a block bad
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
* @mtd: MTD device structure
* @ofs: offset from device start
*
* This is the default implementation, which can be overridden by a hardware
- * specific driver. We try operations in the following order, according to our
- * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
+ uint8_t buf[2] = { 0, 0 };
+ int ret = 0, res, i = 0;
+
+ ops.datbuf = NULL;
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ /* Write to first/last page(s) if necessary */
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+ do {
+ res = nand_do_write_oob(mtd, ofs, &ops);
+ if (!ret)
+ ret = res;
+
+ i++;
+ ofs += mtd->writesize;
+ } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+
+ return ret;
+}
+
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->block_markbad).
+ *
+ * We try operations in the following order:
* (1) erase the affected block, to allow OOB marker to be written cleanly
- * (2) update in-memory BBT
- * (3) write bad block marker to OOB area of affected block
- * (4) update flash-based BBT
- * Note that we retain the first error encountered in (3) or (4), finish the
+ * (2) write bad block marker to OOB area of affected block (unless flag
+ * NAND_BBT_NO_OOB_BBM is present)
+ * (3) update the BBT
+ * Note that we retain the first error encountered in (2) or (3), finish the
* procedures, and dump the error in the end.
*/
-static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
- uint8_t buf[2] = { 0, 0 };
- int block, res, ret = 0, i = 0;
- int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
+ int res, ret = 0;
- if (write_oob) {
+ if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
struct erase_info einfo;
/* Attempt erase before marking OOB */
memset(&einfo, 0, sizeof(einfo));
einfo.mtd = mtd;
einfo.addr = ofs;
- einfo.len = 1 << chip->phys_erase_shift;
+ einfo.len = 1ULL << chip->phys_erase_shift;
nand_erase_nand(mtd, &einfo, 0);
- }
-
- /* Get block number */
- block = (int)(ofs >> chip->bbt_erase_shift);
- /* Mark block bad in memory-based BBT */
- if (chip->bbt)
- chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
-
- /* Write bad block marker to OOB */
- if (write_oob) {
- struct mtd_oob_ops ops;
- loff_t wr_ofs = ofs;
+ /* Write bad block marker to OOB */
nand_get_device(mtd, FL_WRITING);
-
- ops.datbuf = NULL;
- ops.oobbuf = buf;
- ops.ooboffs = chip->badblockpos;
- if (chip->options & NAND_BUSWIDTH_16) {
- ops.ooboffs &= ~0x01;
- ops.len = ops.ooblen = 2;
- } else {
- ops.len = ops.ooblen = 1;
- }
- ops.mode = MTD_OPS_PLACE_OOB;
-
- /* Write to first/last page(s) if necessary */
- if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
- wr_ofs += mtd->erasesize - mtd->writesize;
- do {
- res = nand_do_write_oob(mtd, wr_ofs, &ops);
- if (!ret)
- ret = res;
-
- i++;
- wr_ofs += mtd->writesize;
- } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
-
+ ret = chip->block_markbad(mtd, ofs);
nand_release_device(mtd);
}
- /* Update flash-based bad block table */
- if (chip->bbt_options & NAND_BBT_USE_FLASH) {
- res = nand_update_bbt(mtd, ofs);
+ /* Mark block bad in BBT */
+ if (chip->bbt) {
+ res = nand_markbad_bbt(mtd, ofs);
if (!ret)
ret = res;
}
@@ -1983,13 +1980,14 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
* @mtd: mtd info structure
* @chip: nand chip info structure
- * @column: column address of subpage within the page
+ * @offset: column address of subpage within the page
* @data_len: data length
+ * @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
*/
static int nand_write_subpage_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint32_t offset,
- uint32_t data_len, const uint8_t *data_buf,
+ uint32_t data_len, const uint8_t *buf,
int oob_required)
{
uint8_t *oob_buf = chip->oob_poi;
@@ -2008,20 +2006,20 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
- chip->write_buf(mtd, data_buf, ecc_size);
+ chip->write_buf(mtd, buf, ecc_size);
/* mask ECC of un-touched subpages by padding 0xFF */
if ((step < start_step) || (step > end_step))
memset(ecc_calc, 0xff, ecc_bytes);
else
- chip->ecc.calculate(mtd, data_buf, ecc_calc);
+ chip->ecc.calculate(mtd, buf, ecc_calc);
/* mask OOB of un-touched subpages by padding 0xFF */
/* if oob_required, preserve OOB metadata of written subpage */
if (!oob_required || (step < start_step) || (step > end_step))
memset(oob_buf, 0xff, oob_bytes);
- data_buf += ecc_size;
+ buf += ecc_size;
ecc_calc += ecc_bytes;
oob_buf += oob_bytes;
}
@@ -2633,7 +2631,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
}
/* Increment page address and decrement length */
- len -= (1 << chip->phys_erase_shift);
+ len -= (1ULL << chip->phys_erase_shift);
page += pages_per_block;
/* Check, if we cross a chip boundary */
@@ -2694,7 +2692,6 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
*/
static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- struct nand_chip *chip = mtd->priv;
int ret;
ret = nand_block_isbad(mtd, ofs);
@@ -2705,7 +2702,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
return ret;
}
- return chip->block_markbad(mtd, ofs);
+ return nand_block_markbad_lowlevel(mtd, ofs);
}
/**
@@ -2720,7 +2717,9 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
{
int status;
- if (!chip->onfi_version)
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
@@ -2741,7 +2740,9 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- if (!chip->onfi_version)
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
/* clear the sub feature parameters */
@@ -2793,7 +2794,15 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
if (!chip->select_chip)
chip->select_chip = nand_select_chip;
- if (!chip->read_byte)
+
+ /* set for ONFI nand */
+ if (!chip->onfi_set_features)
+ chip->onfi_set_features = nand_onfi_set_features;
+ if (!chip->onfi_get_features)
+ chip->onfi_get_features = nand_onfi_get_features;
+
+ /* If called twice, pointers that depend on busw may need to be reset */
+ if (!chip->read_byte || chip->read_byte == nand_read_byte)
chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
if (!chip->read_word)
chip->read_word = nand_read_word;
@@ -2801,9 +2810,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
chip->block_bad = nand_block_bad;
if (!chip->block_markbad)
chip->block_markbad = nand_default_block_markbad;
- if (!chip->write_buf)
+ if (!chip->write_buf || chip->write_buf == nand_write_buf)
chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
- if (!chip->read_buf)
+ if (!chip->read_buf || chip->read_buf == nand_read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
if (!chip->scan_bbt)
chip->scan_bbt = nand_default_bbt;
@@ -2846,6 +2855,78 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
return crc;
}
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
+ struct nand_chip *chip, struct nand_onfi_params *p)
+{
+ struct onfi_ext_param_page *ep;
+ struct onfi_ext_section *s;
+ struct onfi_ext_ecc_info *ecc;
+ uint8_t *cursor;
+ int ret = -EINVAL;
+ int len;
+ int i;
+
+ len = le16_to_cpu(p->ext_param_page_length) * 16;
+ ep = kmalloc(len, GFP_KERNEL);
+ if (!ep) {
+ ret = -ENOMEM;
+ goto ext_out;
+ }
+
+ /* Send our own NAND_CMD_PARAM. */
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+
+ /* Use the Change Read Column command to skip the ONFI param pages. */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ sizeof(*p) * p->num_of_param_pages , -1);
+
+ /* Read out the Extended Parameter Page. */
+ chip->read_buf(mtd, (uint8_t *)ep, len);
+ if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+ != le16_to_cpu(ep->crc))) {
+ pr_debug("fail in the CRC.\n");
+ goto ext_out;
+ }
+
+ /*
+ * Check the signature.
+ * Do not strictly follow the ONFI spec, maybe changed in future.
+ */
+ if (strncmp(ep->sig, "EPPS", 4)) {
+ pr_debug("The signature is invalid.\n");
+ goto ext_out;
+ }
+
+ /* find the ECC section. */
+ cursor = (uint8_t *)(ep + 1);
+ for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+ s = ep->sections + i;
+ if (s->type == ONFI_SECTION_TYPE_2)
+ break;
+ cursor += s->length * 16;
+ }
+ if (i == ONFI_EXT_SECTION_MAX) {
+ pr_debug("We can not find the ECC section.\n");
+ goto ext_out;
+ }
+
+ /* get the info we want. */
+ ecc = (struct onfi_ext_ecc_info *)cursor;
+
+ if (ecc->codeword_size) {
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ }
+
+ pr_info("ONFI extended param page detected.\n");
+ return 0;
+
+ext_out:
+ kfree(ep);
+ return ret;
+}
+
/*
* Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
@@ -2907,9 +2988,31 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
chip->chipsize = le32_to_cpu(p->blocks_per_lun);
chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
- *busw = 0;
- if (le16_to_cpu(p->features) & 1)
+
+ if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
*busw = NAND_BUSWIDTH_16;
+ else
+ *busw = 0;
+
+ if (p->ecc_bits != 0xff) {
+ chip->ecc_strength_ds = p->ecc_bits;
+ chip->ecc_step_ds = 512;
+ } else if (chip->onfi_version >= 21 &&
+ (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+ /*
+ * The nand_flash_detect_ext_param_page() uses the
+ * Change Read Column command which maybe not supported
+ * by the chip->cmdfunc. So try to update the chip->cmdfunc
+ * now. We do not replace user supplied command function.
+ */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ /* The Extended Parameter Page is supported since ONFI 2.1. */
+ if (nand_flash_detect_ext_param_page(mtd, chip, p))
+ pr_info("Failed to detect the extended param page.\n");
+ }
pr_info("ONFI flash detected\n");
return 1;
@@ -3086,6 +3189,22 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
extid >>= 2;
/* Get buswidth information */
*busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+ /*
+ * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+ * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+ * follows:
+ * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+ * 110b -> 24nm
+ * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
+ */
+ if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
+ !(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
+ !(id_data[4] & 0x80) /* !BENAND */) {
+ mtd->oobsize = 32 * mtd->writesize >> 9;
+ }
+
}
}
@@ -3172,6 +3291,8 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
chip->cellinfo = id_data[2];
chip->chipsize = (uint64_t)type->chipsize << 20;
chip->options |= type->options;
+ chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
+ chip->ecc_step_ds = NAND_ECC_STEP(type);
*busw = type->options & NAND_BUSWIDTH_16;
@@ -3446,12 +3567,6 @@ int nand_scan_tail(struct mtd_info *mtd)
if (!chip->write_page)
chip->write_page = nand_write_page;
- /* set for ONFI nand */
- if (!chip->onfi_set_features)
- chip->onfi_set_features = nand_onfi_set_features;
- if (!chip->onfi_get_features)
- chip->onfi_get_features = nand_onfi_get_features;
-
/*
* Check ECC mode, default to software if 3byte/512byte hardware ECC is
* selected and we have 256 byte pagesize fallback to software ECC
@@ -3674,6 +3789,7 @@ int nand_scan_tail(struct mtd_info *mtd)
/* propagate ecc info to mtd_info */
mtd->ecclayout = chip->ecc.layout;
mtd->ecc_strength = chip->ecc.strength;
+ mtd->ecc_step_size = chip->ecc.size;
/*
* Initialize bitflip_threshold to its default prior scan_bbt() call.
* scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2672643..bc06196 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -71,6 +71,30 @@
#include <linux/export.h>
#include <linux/string.h>
+#define BBT_BLOCK_GOOD 0x00
+#define BBT_BLOCK_WORN 0x01
+#define BBT_BLOCK_RESERVED 0x02
+#define BBT_BLOCK_FACTORY_BAD 0x03
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
+{
+ uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+ entry >>= (block & BBT_ENTRY_MASK) * 2;
+ return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+ uint8_t mark)
+{
+ uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+ chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
{
if (memcmp(buf, td->pattern, td->len))
@@ -86,33 +110,17 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
* @td: search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block tables and
- * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
- * all bytes except the pattern area contain 0xff.
+ * good / bad block identifiers.
*/
static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
- int end = 0;
- uint8_t *p = buf;
-
if (td->options & NAND_BBT_NO_OOB)
return check_pattern_no_oob(buf, td);
- end = paglen + td->offs;
- if (td->options & NAND_BBT_SCANEMPTY)
- if (memchr_inv(p, 0xff, end))
- return -1;
- p += end;
-
/* Compare the pattern */
- if (memcmp(p, td->pattern, td->len))
+ if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
return -1;
- if (td->options & NAND_BBT_SCANEMPTY) {
- p += td->len;
- end += td->len;
- if (memchr_inv(p, 0xff, len - end))
- return -1;
- }
return 0;
}
@@ -159,7 +167,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
* @page: the starting page
* @num: the number of bbt descriptors to read
* @td: the bbt describtion table
- * @offs: offset in the memory table
+ * @offs: block number offset in the table
*
* Read the bad block table starting from page.
*/
@@ -209,14 +217,16 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
/* Analyse data */
for (i = 0; i < len; i++) {
uint8_t dat = buf[i];
- for (j = 0; j < 8; j += bits, act += 2) {
+ for (j = 0; j < 8; j += bits, act++) {
uint8_t tmp = (dat >> j) & msk;
if (tmp == msk)
continue;
if (reserved_block_code && (tmp == reserved_block_code)) {
pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
- (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
- this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_RESERVED);
mtd->ecc_stats.bbtblocks++;
continue;
}
@@ -225,12 +235,15 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
* move this message to pr_debug.
*/
pr_info("nand_read_bbt: bad block at 0x%012llx\n",
- (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
/* Factory marked bad or worn out? */
if (tmp == 0)
- this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_FACTORY_BAD);
else
- this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_WORN);
mtd->ecc_stats.badblocks++;
}
}
@@ -265,7 +278,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
td, offs);
if (res)
return res;
- offs += this->chipsize >> (this->bbt_erase_shift + 2);
+ offs += this->chipsize >> this->bbt_erase_shift;
}
} else {
res = read_bbt(mtd, buf, td->pages[0],
@@ -478,22 +491,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
else
numpages = 1;
- if (!(bd->options & NAND_BBT_SCANEMPTY)) {
- /* We need only read few bytes from the OOB area */
- scanlen = 0;
- readlen = bd->len;
- } else {
- /* Full page content should be read */
- scanlen = mtd->writesize + mtd->oobsize;
- readlen = numpages * mtd->writesize;
- }
+ /* We need only read few bytes from the OOB area */
+ scanlen = 0;
+ readlen = bd->len;
if (chip == -1) {
- /*
- * Note that numblocks is 2 * (real numblocks) here, see i+=2
- * below as it makes shifting and masking less painful
- */
- numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+ numblocks = mtd->size >> this->bbt_erase_shift;
startblock = 0;
from = 0;
} else {
@@ -502,16 +505,16 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
chip + 1, this->numchips);
return -EINVAL;
}
- numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+ numblocks = this->chipsize >> this->bbt_erase_shift;
startblock = chip * numblocks;
numblocks += startblock;
- from = (loff_t)startblock << (this->bbt_erase_shift - 1);
+ from = (loff_t)startblock << this->bbt_erase_shift;
}
if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
from += mtd->erasesize - (mtd->writesize * numpages);
- for (i = startblock; i < numblocks;) {
+ for (i = startblock; i < numblocks; i++) {
int ret;
BUG_ON(bd->options & NAND_BBT_NO_OOB);
@@ -526,13 +529,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
return ret;
if (ret) {
- this->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
pr_warn("Bad eraseblock %d at 0x%012llx\n",
- i >> 1, (unsigned long long)from);
+ i, (unsigned long long)from);
mtd->ecc_stats.badblocks++;
}
- i += 2;
from += (1 << this->bbt_erase_shift);
}
return 0;
@@ -655,9 +657,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
{
struct nand_chip *this = mtd->priv;
struct erase_info einfo;
- int i, j, res, chip = 0;
+ int i, res, chip = 0;
int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
- int nrchips, bbtoffs, pageoffs, ooboffs;
+ int nrchips, pageoffs, ooboffs;
uint8_t msk[4];
uint8_t rcode = td->reserved_block_code;
size_t retlen, len = 0;
@@ -713,10 +715,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
for (i = 0; i < td->maxblocks; i++) {
int block = startblock + dir * i;
/* Check, if the block is bad */
- switch ((this->bbt[block >> 2] >>
- (2 * (block & 0x03))) & 0x03) {
- case 0x01:
- case 0x03:
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
continue;
}
page = block <<
@@ -748,8 +749,6 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
default: return -EINVAL;
}
- bbtoffs = chip * (numblocks >> 2);
-
to = ((loff_t)page) << this->page_shift;
/* Must we save the block contents? */
@@ -814,16 +813,12 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
buf[ooboffs + td->veroffs] = td->version[chip];
/* Walk through the memory table */
- for (i = 0; i < numblocks;) {
+ for (i = 0; i < numblocks; i++) {
uint8_t dat;
- dat = this->bbt[bbtoffs + (i >> 2)];
- for (j = 0; j < 4; j++, i++) {
- int sftcnt = (i << (3 - sft)) & sftmsk;
- /* Do not store the reserved bbt blocks! */
- buf[offs + (i >> sft)] &=
- ~(msk[dat & 0x03] << sftcnt);
- dat >>= 2;
- }
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ dat = bbt_get_entry(this, chip * numblocks + i);
+ /* Do not store the reserved bbt blocks! */
+ buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
}
memset(&einfo, 0, sizeof(einfo));
@@ -865,7 +860,6 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
{
struct nand_chip *this = mtd->priv;
- bd->options &= ~NAND_BBT_SCANEMPTY;
return create_bbt(mtd, this->buffers->databuf, bd, -1);
}
@@ -1009,7 +1003,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
{
struct nand_chip *this = mtd->priv;
int i, j, chips, block, nrblocks, update;
- uint8_t oldval, newval;
+ uint8_t oldval;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
@@ -1026,12 +1020,12 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
if (td->pages[i] == -1)
continue;
block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
- block <<= 1;
- oldval = this->bbt[(block >> 3)];
- newval = oldval | (0x2 << (block & 0x06));
- this->bbt[(block >> 3)] = newval;
- if ((oldval != newval) && td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if ((oldval != BBT_BLOCK_RESERVED) &&
+ td->reserved_block_code)
+ nand_update_bbt(mtd, (loff_t)block <<
+ this->bbt_erase_shift);
continue;
}
update = 0;
@@ -1039,14 +1033,12 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
block = ((i + 1) * nrblocks) - td->maxblocks;
else
block = i * nrblocks;
- block <<= 1;
for (j = 0; j < td->maxblocks; j++) {
- oldval = this->bbt[(block >> 3)];
- newval = oldval | (0x2 << (block & 0x06));
- this->bbt[(block >> 3)] = newval;
- if (oldval != newval)
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if (oldval != BBT_BLOCK_RESERVED)
update = 1;
- block += 2;
+ block++;
}
/*
* If we want reserved blocks to be recorded to flash, and some
@@ -1054,7 +1046,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
* bbts. This should only happen once.
*/
if (update && td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)(block - 1) <<
+ this->bbt_erase_shift);
}
}
@@ -1180,13 +1173,13 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
}
/**
- * nand_update_bbt - [NAND Interface] update bad block table(s)
+ * nand_update_bbt - update bad block table(s)
* @mtd: MTD device structure
* @offs: the offset of the newly marked block
*
* The function updates the bad block table(s).
*/
-int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
{
struct nand_chip *this = mtd->priv;
int len, res = 0;
@@ -1356,28 +1349,47 @@ int nand_default_bbt(struct mtd_info *mtd)
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct nand_chip *this = mtd->priv;
- int block;
- uint8_t res;
+ int block, res;
- /* Get block number * 2 */
- block = (int)(offs >> (this->bbt_erase_shift - 1));
- res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+ block = (int)(offs >> this->bbt_erase_shift);
+ res = bbt_get_entry(this, block);
pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
"(block %d) 0x%02x\n",
- (unsigned int)offs, block >> 1, res);
+ (unsigned int)offs, block, res);
- switch ((int)res) {
- case 0x00:
+ switch (res) {
+ case BBT_BLOCK_GOOD:
return 0;
- case 0x01:
+ case BBT_BLOCK_WORN:
return 1;
- case 0x02:
+ case BBT_BLOCK_RESERVED:
return allowbbt ? 0 : 1;
}
return 1;
}
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int block, ret = 0;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+
+ /* Mark bad block in memory */
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(mtd, offs);
+
+ return ret;
+}
+
EXPORT_SYMBOL(nand_scan_bbt);
EXPORT_SYMBOL(nand_default_bbt);
-EXPORT_SYMBOL_GPL(nand_update_bbt);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 683813a..a87b0a3 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -33,16 +33,16 @@ struct nand_flash_dev nand_flash_ids[] = {
*/
{"TC58NVG2S0F 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
- SZ_4K, SZ_512, SZ_256K, 0, 8, 224},
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
{"TC58NVG3S0F 8G 3.3V 8-bit",
{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
- SZ_4K, SZ_1K, SZ_256K, 0, 8, 232},
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
{"TC58NVG5D2 32G 3.3V 8-bit",
{ .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
- SZ_8K, SZ_4K, SZ_1M, 0, 8, 640},
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
{"TC58NVG6D2 64G 3.3V 8-bit",
{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
- SZ_8K, SZ_8K, SZ_2M, 0, 8, 640},
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index cb38f3d..bdc1d15 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -205,7 +205,7 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
/* Calculate the page offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET(ns) \
- (((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
+ (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
/* Calculate the OOB offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
@@ -336,7 +336,6 @@ struct nandsim {
uint pgsec; /* number of pages per sector */
uint secshift; /* bits number in sector size */
uint pgshift; /* bits number in page size */
- uint oobshift; /* bits number in OOB size */
uint pgaddrbytes; /* bytes per page address */
uint secaddrbytes; /* bytes per sector address */
uint idbytes; /* the number ID bytes that this chip outputs */
@@ -363,7 +362,7 @@ struct nandsim {
/* Fields needed when using a cache file */
struct file *cfile; /* Open file */
- unsigned char *pages_written; /* Which pages have been written */
+ unsigned long *pages_written; /* Which pages have been written */
void *file_buf;
struct page *held_pages[NS_MAX_HELD_PAGES];
int held_cnt;
@@ -586,7 +585,8 @@ static int alloc_device(struct nandsim *ns)
err = -EINVAL;
goto err_close;
}
- ns->pages_written = vzalloc(ns->geom.pgnum);
+ ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) *
+ sizeof(unsigned long));
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
@@ -653,9 +653,7 @@ static void free_device(struct nandsim *ns)
static char *get_partition_name(int i)
{
- char buf[64];
- sprintf(buf, "NAND simulator partition %d", i);
- return kstrdup(buf, GFP_KERNEL);
+ return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
}
/*
@@ -690,7 +688,6 @@ static int init_nandsim(struct mtd_info *mtd)
ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
ns->geom.secshift = ffs(ns->geom.secsz) - 1;
ns->geom.pgshift = chip->page_shift;
- ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
ns->options = 0;
@@ -761,12 +758,6 @@ static int init_nandsim(struct mtd_info *mtd)
ns->nbparts += 1;
}
- /* Detect how many ID bytes the NAND chip outputs */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (second_id_byte != nand_flash_ids[i].dev_id)
- continue;
- }
-
if (ns->busw == 16)
NS_WARN("16-bit flashes support wasn't tested\n");
@@ -780,7 +771,7 @@ static int init_nandsim(struct mtd_info *mtd)
printk("bus width: %u\n", ns->busw);
printk("bits in sector size: %u\n", ns->geom.secshift);
printk("bits in page size: %u\n", ns->geom.pgshift);
- printk("bits in OOB size: %u\n", ns->geom.oobshift);
+ printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
printk("flash size with OOB: %llu KiB\n",
(unsigned long long)ns->geom.totszoob >> 10);
printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
@@ -1442,7 +1433,7 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
}
-int do_read_error(struct nandsim *ns, int num)
+static int do_read_error(struct nandsim *ns, int num)
{
unsigned int page_no = ns->regs.row;
@@ -1454,7 +1445,7 @@ int do_read_error(struct nandsim *ns, int num)
return 0;
}
-void do_bit_flips(struct nandsim *ns, int num)
+static void do_bit_flips(struct nandsim *ns, int num)
{
if (bitflips && prandom_u32() < (1 << 22)) {
int flips = 1;
@@ -1479,7 +1470,7 @@ static void read_page(struct nandsim *ns, int num)
union ns_mem *mypage;
if (ns->cfile) {
- if (!ns->pages_written[ns->regs.row]) {
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
NS_DBG("read_page: page %d not written\n", ns->regs.row);
memset(ns->buf.byte, 0xFF, num);
} else {
@@ -1490,7 +1481,7 @@ static void read_page(struct nandsim *ns, int num)
ns->regs.row, ns->regs.column + ns->regs.off);
if (do_read_error(ns, num))
return;
- pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+ pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
if (tx != num) {
NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
@@ -1525,9 +1516,9 @@ static void erase_sector(struct nandsim *ns)
if (ns->cfile) {
for (i = 0; i < ns->geom.pgsec; i++)
- if (ns->pages_written[ns->regs.row + i]) {
+ if (__test_and_clear_bit(ns->regs.row + i,
+ ns->pages_written)) {
NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
- ns->pages_written[ns->regs.row + i] = 0;
}
return;
}
@@ -1559,8 +1550,8 @@ static int prog_page(struct nandsim *ns, int num)
NS_DBG("prog_page: writing page %d\n", ns->regs.row);
pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
- off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
- if (!ns->pages_written[ns->regs.row]) {
+ off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
all = 1;
memset(ns->file_buf, 0xff, ns->geom.pgszoob);
} else {
@@ -1580,7 +1571,7 @@ static int prog_page(struct nandsim *ns, int num)
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
- ns->pages_written[ns->regs.row] = 1;
+ __set_bit(ns->regs.row, ns->pages_written);
} else {
tx = write_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index cd6be2e..5211515 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -324,8 +324,6 @@ static int nuc900_nand_remove(struct platform_device *pdev)
kfree(nuc900_nand);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 81b80af..4ecf0e5 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -154,7 +154,7 @@ static struct nand_ecclayout omap_oobinfo;
*/
static uint8_t scan_ff_pattern[] = { 0xff };
static struct nand_bbt_descr bb_descrip_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .options = NAND_BBT_SCANALLPAGES,
.offs = 0,
.len = 1,
.pattern = scan_ff_pattern,
@@ -1831,7 +1831,7 @@ static int omap_nand_probe(struct platform_device *pdev)
struct resource *res;
struct mtd_part_parser_data ppdata = {};
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata == NULL) {
dev_err(&pdev->dev, "platform data missing\n");
return -ENODEV;
@@ -2087,7 +2087,6 @@ static int omap_nand_remove(struct platform_device *pdev)
mtd);
omap3_free_bch(&info->mtd);
- platform_set_drvdata(pdev, NULL);
if (info->dma)
dma_release_channel(info->dma);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 8fbd002..a393a5b 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -130,8 +130,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
if (!of_property_read_u32(pdev->dev.of_node,
"chip-delay", &val))
board->chip_delay = (u8)val;
- } else
- board = pdev->dev.platform_data;
+ } else {
+ board = dev_get_platdata(&pdev->dev);
+ }
mtd->priv = nc;
mtd->owner = THIS_MODULE;
@@ -186,7 +187,6 @@ no_dev:
clk_disable_unprepare(clk);
clk_put(clk);
}
- platform_set_drvdata(pdev, NULL);
iounmap(io_base);
no_res:
kfree(nc);
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index c004566..cad4cdc 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -30,7 +30,7 @@ static const char *part_probe_types[] = { "cmdlinepart", NULL };
*/
static int plat_nand_probe(struct platform_device *pdev)
{
- struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
struct mtd_part_parser_data ppdata;
struct plat_nand_data *data;
struct resource *res;
@@ -122,7 +122,6 @@ static int plat_nand_probe(struct platform_device *pdev)
out:
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
- platform_set_drvdata(pdev, NULL);
iounmap(data->io_base);
out_release_io:
release_mem_region(res->start, resource_size(res));
@@ -137,7 +136,7 @@ out_free:
static int plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
- struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index dec80ca..5db900d 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -25,7 +25,14 @@
#include <linux/of.h>
#include <linux/of_device.h>
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
+#define ARCH_HAS_DMA
+#endif
+
+#ifdef ARCH_HAS_DMA
#include <mach/dma.h>
+#endif
+
#include <linux/platform_data/mtd-nand-pxa3xx.h>
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
@@ -80,6 +87,7 @@
#define NDSR_RDDREQ (0x1 << 1)
#define NDSR_WRCMDREQ (0x1)
+#define NDCB0_LEN_OVRD (0x1 << 28)
#define NDCB0_ST_ROW_EN (0x1 << 26)
#define NDCB0_AUTO_RS (0x1 << 25)
#define NDCB0_CSEL (0x1 << 24)
@@ -123,9 +131,13 @@ enum {
STATE_READY,
};
+enum pxa3xx_nand_variant {
+ PXA3XX_NAND_VARIANT_PXA,
+ PXA3XX_NAND_VARIANT_ARMADA370,
+};
+
struct pxa3xx_nand_host {
struct nand_chip chip;
- struct pxa3xx_nand_cmdset *cmdset;
struct mtd_info *mtd;
void *info_data;
@@ -139,10 +151,6 @@ struct pxa3xx_nand_host {
unsigned int row_addr_cycles;
size_t read_id_bytes;
- /* cached register value */
- uint32_t reg_ndcr;
- uint32_t ndtr0cs0;
- uint32_t ndtr1cs0;
};
struct pxa3xx_nand_info {
@@ -171,9 +179,16 @@ struct pxa3xx_nand_info {
struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
unsigned int state;
+ /*
+ * This driver supports NFCv1 (as found in PXA SoC)
+ * and NFCv2 (as found in Armada 370/XP SoC).
+ */
+ enum pxa3xx_nand_variant variant;
+
int cs;
int use_ecc; /* use HW ECC ? */
int use_dma; /* use DMA ? */
+ int use_spare; /* use spare ? */
int is_ready;
unsigned int page_size; /* page size of attached chip */
@@ -181,33 +196,22 @@ struct pxa3xx_nand_info {
unsigned int oob_size;
int retcode;
+ /* cached register value */
+ uint32_t reg_ndcr;
+ uint32_t ndtr0cs0;
+ uint32_t ndtr1cs0;
+
/* generated NDCBx register values */
uint32_t ndcb0;
uint32_t ndcb1;
uint32_t ndcb2;
+ uint32_t ndcb3;
};
static bool use_dma = 1;
module_param(use_dma, bool, 0444);
MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
-/*
- * Default NAND flash controller configuration setup by the
- * bootloader. This configuration is used only when pdata->keep_config is set
- */
-static struct pxa3xx_nand_cmdset default_cmdset = {
- .read1 = 0x3000,
- .read2 = 0x0050,
- .program = 0x1080,
- .read_status = 0x0070,
- .read_id = 0x0090,
- .erase = 0xD060,
- .reset = 0x00FF,
- .lock = 0x002A,
- .unlock = 0x2423,
- .lock_status = 0x007A,
-};
-
static struct pxa3xx_nand_timing timing[] = {
{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
{ 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
@@ -230,8 +234,6 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
/* Define a default flash type setting serve as flash detecting only */
#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
-const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
-
#define NDTR0_tCH(c) (min((c), 7) << 19)
#define NDTR0_tCS(c) (min((c), 7) << 16)
#define NDTR0_tWH(c) (min((c), 7) << 11)
@@ -264,8 +266,8 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
- host->ndtr0cs0 = ndtr0;
- host->ndtr1cs0 = ndtr1;
+ info->ndtr0cs0 = ndtr0;
+ info->ndtr1cs0 = ndtr1;
nand_writel(info, NDTR0CS0, ndtr0);
nand_writel(info, NDTR1CS0, ndtr1);
}
@@ -273,7 +275,7 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
{
struct pxa3xx_nand_host *host = info->host[info->cs];
- int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
+ int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
info->data_size = host->page_size;
if (!oob_enable) {
@@ -299,12 +301,25 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
*/
static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
{
- struct pxa3xx_nand_host *host = info->host[info->cs];
uint32_t ndcr;
- ndcr = host->reg_ndcr;
- ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
- ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
+ ndcr = info->reg_ndcr;
+
+ if (info->use_ecc)
+ ndcr |= NDCR_ECC_EN;
+ else
+ ndcr &= ~NDCR_ECC_EN;
+
+ if (info->use_dma)
+ ndcr |= NDCR_DMA_EN;
+ else
+ ndcr &= ~NDCR_DMA_EN;
+
+ if (info->use_spare)
+ ndcr |= NDCR_SPARE_EN;
+ else
+ ndcr &= ~NDCR_SPARE_EN;
+
ndcr |= NDCR_ND_RUN;
/* clear status bits and run */
@@ -333,7 +348,8 @@ static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
nand_writel(info, NDSR, NDSR_MASK);
}
-static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+static void __maybe_unused
+enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
{
uint32_t ndcr;
@@ -373,6 +389,7 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
}
}
+#ifdef ARCH_HAS_DMA
static void start_data_dma(struct pxa3xx_nand_info *info)
{
struct pxa_dma_desc *desc = info->data_desc;
@@ -419,6 +436,10 @@ static void pxa3xx_nand_data_dma_irq(int channel, void *data)
enable_int(info, NDCR_INT_MASK);
nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
}
+#else
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{}
+#endif
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
@@ -467,9 +488,22 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDSR, NDSR_WRCMDREQ);
status &= ~NDSR_WRCMDREQ;
info->state = STATE_CMD_HANDLE;
+
+ /*
+ * Command buffer registers NDCB{0-2} (and optionally NDCB3)
+ * must be loaded by writing directly either 12 or 16
+ * bytes directly to NDCB0, four bytes at a time.
+ *
+ * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
+ * but each NDCBx register can be read.
+ */
nand_writel(info, NDCB0, info->ndcb0);
nand_writel(info, NDCB0, info->ndcb1);
nand_writel(info, NDCB0, info->ndcb2);
+
+ /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ nand_writel(info, NDCB0, info->ndcb3);
}
/* clear NDSR to let the controller exit the IRQ */
@@ -491,7 +525,6 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)
static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
uint16_t column, int page_addr)
{
- uint16_t cmd;
int addr_cycle, exec_cmd;
struct pxa3xx_nand_host *host;
struct mtd_info *mtd;
@@ -506,6 +539,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
info->buf_count = 0;
info->oob_size = 0;
info->use_ecc = 0;
+ info->use_spare = 1;
+ info->use_dma = (use_dma) ? 1 : 0;
info->is_ready = 0;
info->retcode = ERR_NONE;
if (info->cs != 0)
@@ -520,12 +555,16 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
case NAND_CMD_READOOB:
pxa3xx_set_datasize(info);
break;
+ case NAND_CMD_PARAM:
+ info->use_spare = 0;
+ break;
case NAND_CMD_SEQIN:
exec_cmd = 0;
break;
default:
info->ndcb1 = 0;
info->ndcb2 = 0;
+ info->ndcb3 = 0;
break;
}
@@ -535,21 +574,17 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
switch (command) {
case NAND_CMD_READOOB:
case NAND_CMD_READ0:
- cmd = host->cmdset->read1;
+ info->buf_start = column;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | addr_cycle
+ | NAND_CMD_READ0;
+
if (command == NAND_CMD_READOOB)
- info->buf_start = mtd->writesize + column;
- else
- info->buf_start = column;
+ info->buf_start += mtd->writesize;
- if (unlikely(host->page_size < PAGE_CHUNK_SIZE))
- info->ndcb0 |= NDCB0_CMD_TYPE(0)
- | addr_cycle
- | (cmd & NDCB0_CMD1_MASK);
- else
- info->ndcb0 |= NDCB0_CMD_TYPE(0)
- | NDCB0_DBC
- | addr_cycle
- | cmd;
+ /* Second command setting for large pages */
+ if (host->page_size >= PAGE_CHUNK_SIZE)
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
case NAND_CMD_SEQIN:
/* small page addr setting */
@@ -580,49 +615,58 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
}
- cmd = host->cmdset->program;
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
| NDCB0_AUTO_RS
| NDCB0_ST_ROW_EN
| NDCB0_DBC
- | cmd
+ | (NAND_CMD_PAGEPROG << 8)
+ | NAND_CMD_SEQIN
| addr_cycle;
break;
+ case NAND_CMD_PARAM:
+ info->buf_count = 256;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | NDCB0_ADDR_CYC(1)
+ | NDCB0_LEN_OVRD
+ | command;
+ info->ndcb1 = (column & 0xFF);
+ info->ndcb3 = 256;
+ info->data_size = 256;
+ break;
+
case NAND_CMD_READID:
- cmd = host->cmdset->read_id;
info->buf_count = host->read_id_bytes;
info->ndcb0 |= NDCB0_CMD_TYPE(3)
| NDCB0_ADDR_CYC(1)
- | cmd;
+ | command;
+ info->ndcb1 = (column & 0xFF);
info->data_size = 8;
break;
case NAND_CMD_STATUS:
- cmd = host->cmdset->read_status;
info->buf_count = 1;
info->ndcb0 |= NDCB0_CMD_TYPE(4)
| NDCB0_ADDR_CYC(1)
- | cmd;
+ | command;
info->data_size = 8;
break;
case NAND_CMD_ERASE1:
- cmd = host->cmdset->erase;
info->ndcb0 |= NDCB0_CMD_TYPE(2)
| NDCB0_AUTO_RS
| NDCB0_ADDR_CYC(3)
| NDCB0_DBC
- | cmd;
+ | (NAND_CMD_ERASE2 << 8)
+ | NAND_CMD_ERASE1;
info->ndcb1 = page_addr;
info->ndcb2 = 0;
break;
case NAND_CMD_RESET:
- cmd = host->cmdset->reset;
info->ndcb0 |= NDCB0_CMD_TYPE(5)
- | cmd;
+ | command;
break;
@@ -652,7 +696,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
* "byte" address into a "word" address appropriate
* for indexing a word-oriented device
*/
- if (host->reg_ndcr & NDCR_DWIDTH_M)
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
column /= 2;
/*
@@ -662,8 +706,8 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
*/
if (info->cs != host->cs) {
info->cs = host->cs;
- nand_writel(info, NDTR0CS0, host->ndtr0cs0);
- nand_writel(info, NDTR1CS0, host->ndtr1cs0);
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
}
info->state = STATE_PREPARED;
@@ -803,7 +847,7 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
const struct pxa3xx_nand_flash *f)
{
struct platform_device *pdev = info->pdev;
- struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct pxa3xx_nand_host *host = info->host[info->cs];
uint32_t ndcr = 0x0; /* enable all interrupts */
@@ -818,7 +862,6 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
}
/* calculate flash information */
- host->cmdset = &default_cmdset;
host->page_size = f->page_size;
host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
@@ -840,7 +883,7 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
ndcr |= NDCR_SPARE_EN; /* enable spare by default */
- host->reg_ndcr = ndcr;
+ info->reg_ndcr = ndcr;
pxa3xx_nand_set_timing(host, f->timing);
return 0;
@@ -863,12 +906,9 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
host->read_id_bytes = 2;
}
- host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
- host->cmdset = &default_cmdset;
-
- host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
- host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
-
+ info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
+ info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+ info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
return 0;
}
@@ -878,6 +918,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
*/
#define MAX_BUFF_SIZE PAGE_SIZE
+#ifdef ARCH_HAS_DMA
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
struct platform_device *pdev = info->pdev;
@@ -912,6 +953,32 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
return 0;
}
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+ struct platform_device *pdev = info->pdev;
+ if (use_dma) {
+ pxa_free_dma(info->data_dma_ch);
+ dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+ info->data_buff, info->data_buff_phys);
+ } else {
+ kfree(info->data_buff);
+ }
+}
+#else
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+ info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ if (info->data_buff == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+ kfree(info->data_buff);
+}
+#endif
+
static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
{
struct mtd_info *mtd;
@@ -934,7 +1001,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
struct platform_device *pdev = info->pdev;
- struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
const struct pxa3xx_nand_flash *f = NULL;
struct nand_chip *chip = mtd->priv;
@@ -1003,7 +1070,7 @@ KEEP_CONFIG:
chip->ecc.size = host->page_size;
chip->ecc.strength = 1;
- if (host->reg_ndcr & NDCR_DWIDTH_M)
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
chip->options |= NAND_BUSWIDTH_16;
if (nand_scan_ident(mtd, 1, def))
@@ -1019,8 +1086,6 @@ KEEP_CONFIG:
host->row_addr_cycles = 3;
else
host->row_addr_cycles = 2;
-
- mtd->name = mtd_names[0];
return nand_scan_tail(mtd);
}
@@ -1034,13 +1099,11 @@ static int alloc_nand_resource(struct platform_device *pdev)
struct resource *r;
int ret, irq, cs;
- pdata = pdev->dev.platform_data;
- info = kzalloc(sizeof(*info) + (sizeof(*mtd) +
- sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
- if (!info) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ pdata = dev_get_platdata(&pdev->dev);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
+ sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
+ if (!info)
return -ENOMEM;
- }
info->pdev = pdev;
for (cs = 0; cs < pdata->num_cs; cs++) {
@@ -1069,72 +1132,64 @@ static int alloc_nand_resource(struct platform_device *pdev)
spin_lock_init(&chip->controller->lock);
init_waitqueue_head(&chip->controller->wq);
- info->clk = clk_get(&pdev->dev, NULL);
+ info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get nand clock\n");
- ret = PTR_ERR(info->clk);
- goto fail_free_mtd;
+ return PTR_ERR(info->clk);
}
- clk_enable(info->clk);
-
- /*
- * This is a dirty hack to make this driver work from devicetree
- * bindings. It can be removed once we have a prober DMA controller
- * framework for DT.
- */
- if (pdev->dev.of_node && cpu_is_pxa3xx()) {
- info->drcmr_dat = 97;
- info->drcmr_cmd = 99;
- } else {
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for data DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
- }
- info->drcmr_dat = r->start;
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0)
+ return ret;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for command DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
+ if (use_dma) {
+ /*
+ * This is a dirty hack to make this driver work from
+ * devicetree bindings. It can be removed once we have
+ * a prober DMA controller framework for DT.
+ */
+ if (pdev->dev.of_node &&
+ of_machine_is_compatible("marvell,pxa3xx")) {
+ info->drcmr_dat = 97;
+ info->drcmr_cmd = 99;
+ } else {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
+ "no resource defined for data DMA\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
+ }
+ info->drcmr_dat = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
+ "no resource defined for cmd DMA\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
+ }
+ info->drcmr_cmd = r->start;
}
- info->drcmr_cmd = r->start;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ resource defined\n");
ret = -ENXIO;
- goto fail_put_clk;
+ goto fail_disable_clk;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no IO memory resource defined\n");
- ret = -ENODEV;
- goto fail_put_clk;
- }
-
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (r == NULL) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- ret = -EBUSY;
- goto fail_put_clk;
- }
-
- info->mmio_base = ioremap(r->start, resource_size(r));
- if (info->mmio_base == NULL) {
- dev_err(&pdev->dev, "ioremap() failed\n");
- ret = -ENODEV;
- goto fail_free_res;
+ info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(info->mmio_base)) {
+ ret = PTR_ERR(info->mmio_base);
+ goto fail_disable_clk;
}
info->mmio_phys = r->start;
ret = pxa3xx_nand_init_buff(info);
if (ret)
- goto fail_free_io;
+ goto fail_disable_clk;
/* initialize all interrupts to be disabled */
disable_int(info, NDSR_MASK);
@@ -1152,21 +1207,9 @@ static int alloc_nand_resource(struct platform_device *pdev)
fail_free_buf:
free_irq(irq, info);
- if (use_dma) {
- pxa_free_dma(info->data_dma_ch);
- dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
- info->data_buff, info->data_buff_phys);
- } else
- kfree(info->data_buff);
-fail_free_io:
- iounmap(info->mmio_base);
-fail_free_res:
- release_mem_region(r->start, resource_size(r));
-fail_put_clk:
- clk_disable(info->clk);
- clk_put(info->clk);
-fail_free_mtd:
- kfree(info);
+ pxa3xx_nand_free_buff(info);
+fail_disable_clk:
+ clk_disable_unprepare(info->clk);
return ret;
}
@@ -1174,44 +1217,48 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
{
struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
struct pxa3xx_nand_platform_data *pdata;
- struct resource *r;
int irq, cs;
if (!info)
return 0;
- pdata = pdev->dev.platform_data;
- platform_set_drvdata(pdev, NULL);
+ pdata = dev_get_platdata(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, info);
- if (use_dma) {
- pxa_free_dma(info->data_dma_ch);
- dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE,
- info->data_buff, info->data_buff_phys);
- } else
- kfree(info->data_buff);
-
- iounmap(info->mmio_base);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
+ pxa3xx_nand_free_buff(info);
- clk_disable(info->clk);
- clk_put(info->clk);
+ clk_disable_unprepare(info->clk);
for (cs = 0; cs < pdata->num_cs; cs++)
nand_release(info->host[cs]->mtd);
- kfree(info);
return 0;
}
#ifdef CONFIG_OF
static struct of_device_id pxa3xx_nand_dt_ids[] = {
- { .compatible = "marvell,pxa3xx-nand" },
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_PXA,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+ },
{}
};
-MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+ if (!of_id)
+ return PXA3XX_NAND_VARIANT_PXA;
+ return (enum pxa3xx_nand_variant)of_id->data;
+}
static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
{
@@ -1251,11 +1298,18 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
struct pxa3xx_nand_info *info;
int ret, cs, probe_success;
+#ifndef ARCH_HAS_DMA
+ if (use_dma) {
+ use_dma = 0;
+ dev_warn(&pdev->dev,
+ "This platform can't do DMA on this device\n");
+ }
+#endif
ret = pxa3xx_nand_probe_dt(pdev);
if (ret)
return ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
return -ENODEV;
@@ -1268,10 +1322,14 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
}
info = platform_get_drvdata(pdev);
+ info->variant = pxa3xx_nand_get_variant(pdev);
probe_success = 0;
for (cs = 0; cs < pdata->num_cs; cs++) {
+ struct mtd_info *mtd = info->host[cs]->mtd;
+
+ mtd->name = pdev->name;
info->cs = cs;
- ret = pxa3xx_nand_scan(info->host[cs]->mtd);
+ ret = pxa3xx_nand_scan(mtd);
if (ret) {
dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
cs);
@@ -1279,7 +1337,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
}
ppdata.of_node = pdev->dev.of_node;
- ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
+ ret = mtd_device_parse_register(mtd, NULL,
&ppdata, pdata->parts[cs],
pdata->nr_parts[cs]);
if (!ret)
@@ -1302,7 +1360,7 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
struct mtd_info *mtd;
int cs;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (info->state) {
dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
return -EAGAIN;
@@ -1323,7 +1381,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
struct mtd_info *mtd;
int cs;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
/* We don't want to handle interrupt without calling mtd routine */
disable_int(info, NDCR_INT_MASK);
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 4495f85..9dcf02d 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -229,7 +229,7 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
/*
* Program data lines of the nand chip to send data to it
*/
-void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(mtd);
uint32_t reg;
@@ -261,7 +261,7 @@ void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
/*
* Read data lines of the nand chip to retrieve data
*/
-void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(mtd);
uint32_t reg;
@@ -312,7 +312,7 @@ static uint8_t r852_read_byte(struct mtd_info *mtd)
/*
* Control several chip lines & send commands
*/
-void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
{
struct r852_device *dev = r852_get_dev(mtd);
@@ -357,7 +357,7 @@ void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
* Wait till card is ready.
* based on nand_wait, but returns errors on DMA error
*/
-int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
struct r852_device *dev = chip->priv;
@@ -386,7 +386,7 @@ int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
* Check if card is ready
*/
-int r852_ready(struct mtd_info *mtd)
+static int r852_ready(struct mtd_info *mtd)
{
struct r852_device *dev = r852_get_dev(mtd);
return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
@@ -397,7 +397,7 @@ int r852_ready(struct mtd_info *mtd)
* Set ECC engine mode
*/
-void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+static void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
{
struct r852_device *dev = r852_get_dev(mtd);
@@ -429,7 +429,7 @@ void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
* Calculate ECC, only used for writes
*/
-int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code)
{
struct r852_device *dev = r852_get_dev(mtd);
@@ -461,7 +461,7 @@ int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
* Correct the data using ECC, hw did almost everything for us
*/
-int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
uint16_t ecc_reg;
@@ -529,7 +529,7 @@ static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
* Start the nand engine
*/
-void r852_engine_enable(struct r852_device *dev)
+static void r852_engine_enable(struct r852_device *dev)
{
if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
@@ -547,7 +547,7 @@ void r852_engine_enable(struct r852_device *dev)
* Stop the nand engine
*/
-void r852_engine_disable(struct r852_device *dev)
+static void r852_engine_disable(struct r852_device *dev)
{
r852_write_reg_dword(dev, R852_HW, 0);
r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
@@ -557,7 +557,7 @@ void r852_engine_disable(struct r852_device *dev)
* Test if card is present
*/
-void r852_card_update_present(struct r852_device *dev)
+static void r852_card_update_present(struct r852_device *dev)
{
unsigned long flags;
uint8_t reg;
@@ -572,7 +572,7 @@ void r852_card_update_present(struct r852_device *dev)
* Update card detection IRQ state according to current card state
* which is read in r852_card_update_present
*/
-void r852_update_card_detect(struct r852_device *dev)
+static void r852_update_card_detect(struct r852_device *dev)
{
int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
dev->card_unstable = 0;
@@ -586,8 +586,8 @@ void r852_update_card_detect(struct r852_device *dev)
r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
}
-ssize_t r852_media_type_show(struct device *sys_dev,
- struct device_attribute *attr, char *buf)
+static ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
struct r852_device *dev = r852_get_dev(mtd);
@@ -597,11 +597,11 @@ ssize_t r852_media_type_show(struct device *sys_dev,
return strlen(data);
}
-DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
/* Detect properties of card in slot */
-void r852_update_media_status(struct r852_device *dev)
+static void r852_update_media_status(struct r852_device *dev)
{
uint8_t reg;
unsigned long flags;
@@ -630,7 +630,7 @@ void r852_update_media_status(struct r852_device *dev)
* Register the nand device
* Called when the card is detected
*/
-int r852_register_nand_device(struct r852_device *dev)
+static int r852_register_nand_device(struct r852_device *dev)
{
dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
@@ -668,7 +668,7 @@ error1:
* Unregister the card
*/
-void r852_unregister_nand_device(struct r852_device *dev)
+static void r852_unregister_nand_device(struct r852_device *dev)
{
if (!dev->card_registred)
return;
@@ -682,7 +682,7 @@ void r852_unregister_nand_device(struct r852_device *dev)
}
/* Card state updater */
-void r852_card_detect_work(struct work_struct *work)
+static void r852_card_detect_work(struct work_struct *work)
{
struct r852_device *dev =
container_of(work, struct r852_device, card_detect_work.work);
@@ -821,7 +821,7 @@ out:
return ret;
}
-int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
int error;
struct nand_chip *chip;
@@ -961,7 +961,7 @@ error1:
return error;
}
-void r852_remove(struct pci_dev *pci_dev)
+static void r852_remove(struct pci_dev *pci_dev)
{
struct r852_device *dev = pci_get_drvdata(pci_dev);
@@ -992,7 +992,7 @@ void r852_remove(struct pci_dev *pci_dev)
pci_disable_device(pci_dev);
}
-void r852_shutdown(struct pci_dev *pci_dev)
+static void r852_shutdown(struct pci_dev *pci_dev)
{
struct r852_device *dev = pci_get_drvdata(pci_dev);
@@ -1002,7 +1002,7 @@ void r852_shutdown(struct pci_dev *pci_dev)
pci_disable_device(pci_dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int r852_suspend(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
@@ -1055,9 +1055,6 @@ static int r852_resume(struct device *device)
r852_update_card_detect(dev);
return 0;
}
-#else
-#define r852_suspend NULL
-#define r852_resume NULL
#endif
static const struct pci_device_id r852_pci_id_tbl[] = {
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d65afd2..d65cbe9 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -150,7 +150,7 @@ static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
{
- return dev->dev.platform_data;
+ return dev_get_platdata(&dev->dev);
}
static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
@@ -697,8 +697,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = to_nand_info(pdev);
- platform_set_drvdata(pdev, NULL);
-
if (info == NULL)
return 0;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e57e18e..a3c84eb 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -137,7 +137,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
dma_cap_mask_t mask;
struct dma_slave_config cfg;
struct platform_device *pdev = flctl->pdev;
- struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
+ struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
if (!pdata)
@@ -1131,7 +1131,7 @@ static int flctl_probe(struct platform_device *pdev)
if (pdev->dev.of_node)
pdata = flctl_parse_dt(&pdev->dev);
else
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no setup data defined\n");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 127bc42..87908d7 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -112,7 +112,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
struct resource *r;
int err = 0;
struct sharpsl_nand *sharpsl;
- struct sharpsl_nand_platform_data *data = pdev->dev.platform_data;
+ struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "no platform data!\n");
@@ -194,7 +194,6 @@ err_add:
nand_release(&sharpsl->mtd);
err_scan:
- platform_set_drvdata(pdev, NULL);
iounmap(sharpsl->io);
err_ioremap:
err_get_res:
@@ -212,8 +211,6 @@ static int sharpsl_nand_remove(struct platform_device *pdev)
/* Release resources, unregister device */
nand_release(&sharpsl->mtd);
- platform_set_drvdata(pdev, NULL);
-
iounmap(sharpsl->io);
/* Free the MTD device structure */
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index e8181ed..e06b5e5 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -42,7 +42,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_oob_ops ops;
struct sm_oob oob;
- int ret, error = 0;
+ int ret;
memset(&oob, -1, SM_OOB_SIZE);
oob.block_status = 0x0F;
@@ -61,11 +61,10 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
(int)ofs);
- error = -EIO;
- } else
- mtd->ecc_stats.badblocks++;
+ return -EIO;
+ }
- return error;
+ return 0;
}
static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 508e9e0..396530d 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -357,7 +357,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct tmio_nand_data *data = dev->dev.platform_data;
+ struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
struct resource *ccr = platform_get_resource(dev,
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 7ed654c..235714a 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -87,7 +87,7 @@ static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
- struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
return drvdata->base + (reg << plat->shift);
}
@@ -138,7 +138,7 @@ static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
struct nand_chip *chip = mtd->priv;
struct txx9ndfmc_priv *txx9_priv = chip->priv;
struct platform_device *dev = txx9_priv->dev;
- struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
if (ctrl & NAND_CTRL_CHANGE) {
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
@@ -225,7 +225,7 @@ static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
static void txx9ndfmc_initialize(struct platform_device *dev)
{
- struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int tmout = 100;
@@ -274,19 +274,17 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
- struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
int hold, spw;
int i;
struct txx9ndfmc_drvdata *drvdata;
unsigned long gbusclk = plat->gbus_clock;
struct resource *res;
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
drvdata->base = devm_ioremap_resource(&dev->dev, res);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
@@ -387,7 +385,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int i;
- platform_set_drvdata(dev, NULL);
if (!drvdata)
return 0;
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 553d6d6..d64f8c3 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -20,6 +20,11 @@
#include <linux/slab.h>
#include <linux/mtd/partitions.h>
+static bool node_has_compatible(struct device_node *pp)
+{
+ return of_get_property(pp, "compatible", NULL);
+}
+
static int parse_ofpart_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
@@ -38,10 +43,13 @@ static int parse_ofpart_partitions(struct mtd_info *master,
return 0;
/* First count the subnodes */
- pp = NULL;
nr_parts = 0;
- while ((pp = of_get_next_child(node, pp)))
+ for_each_child_of_node(node, pp) {
+ if (node_has_compatible(pp))
+ continue;
+
nr_parts++;
+ }
if (nr_parts == 0)
return 0;
@@ -50,13 +58,15 @@ static int parse_ofpart_partitions(struct mtd_info *master,
if (!*pparts)
return -ENOMEM;
- pp = NULL;
i = 0;
- while ((pp = of_get_next_child(node, pp))) {
+ for_each_child_of_node(node, pp) {
const __be32 *reg;
int len;
int a_cells, s_cells;
+ if (node_has_compatible(pp))
+ continue;
+
reg = of_get_property(pp, "reg", &len);
if (!reg) {
nr_parts--;
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 9f11562..63699ff 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -38,7 +38,7 @@ struct onenand_info {
static int generic_onenand_probe(struct platform_device *pdev)
{
struct onenand_info *info;
- struct onenand_platform_data *pdata = pdev->dev.platform_data;
+ struct onenand_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res = pdev->resource;
unsigned long size = resource_size(res);
int err;
@@ -94,8 +94,6 @@ static int generic_onenand_remove(struct platform_device *pdev)
struct resource *res = pdev->resource;
unsigned long size = resource_size(res);
- platform_set_drvdata(pdev, NULL);
-
if (info) {
onenand_release(&info->mtd);
release_mem_region(res->start, size);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index d98b198..558071b 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -639,7 +639,7 @@ static int omap2_onenand_probe(struct platform_device *pdev)
struct resource *res;
struct mtd_part_parser_data ppdata = {};
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata == NULL) {
dev_err(&pdev->dev, "platform data missing\n");
return -ENODEV;
@@ -810,7 +810,6 @@ static int omap2_onenand_remove(struct platform_device *pdev)
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
omap2_onenand_shutdown(pdev);
- platform_set_drvdata(pdev, NULL);
if (c->gpio_irq) {
free_irq(gpio_to_irq(c->gpio_irq), c);
gpio_free(c->gpio_irq);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 66fe3b7..08d0085 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -133,7 +133,6 @@ static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_desc
{
struct onenand_chip *this = mtd->priv;
- bd->options &= ~NAND_BBT_SCANEMPTY;
return create_bbt(mtd, this->page_buf, bd, -1);
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 2cf7408..df7400d 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -867,7 +867,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
struct resource *r;
int size, err;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
@@ -1073,7 +1073,6 @@ static int s3c_onenand_remove(struct platform_device *pdev)
release_mem_region(onenand->base_res->start,
resource_size(onenand->base_res));
- platform_set_drvdata(pdev, NULL);
kfree(onenand->oob_buf);
kfree(onenand->page_buf);
kfree(onenand);
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index f9d5615..4b8e895 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -22,7 +22,7 @@
-struct workqueue_struct *cache_flush_workqueue;
+static struct workqueue_struct *cache_flush_workqueue;
static int cache_timeout = 1000;
module_param(cache_timeout, int, S_IRUGO);
@@ -41,7 +41,7 @@ struct sm_sysfs_attribute {
int len;
};
-ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sm_sysfs_attribute *sm_attr =
@@ -54,7 +54,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
#define NUM_ATTRIBUTES 1
#define SM_CIS_VENDOR_OFFSET 0x59
-struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
{
struct attribute_group *attr_group;
struct attribute **attributes;
@@ -107,7 +107,7 @@ error1:
return NULL;
}
-void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
{
struct attribute **attributes = ftl->disk_attributes->attrs;
int i;
@@ -571,7 +571,7 @@ static const uint8_t cis_signature[] = {
};
/* Find out media parameters.
* This ideally has to be based on nand id, but for now device size is enough */
-int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
{
int i;
int size_in_megs = mtd->size / (1024 * 1024);
@@ -878,7 +878,7 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
}
/* Get and automatically initialize an FTL mapping for one zone */
-struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
{
struct ftl_zone *zone;
int error;
@@ -899,7 +899,7 @@ struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
/* ----------------- cache handling ------------------------------------------*/
/* Initialize the one block cache */
-void sm_cache_init(struct sm_ftl *ftl)
+static void sm_cache_init(struct sm_ftl *ftl)
{
ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
ftl->cache_clean = 1;
@@ -909,7 +909,7 @@ void sm_cache_init(struct sm_ftl *ftl)
}
/* Put sector in one block cache */
-void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
{
memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
@@ -917,7 +917,7 @@ void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
}
/* Read a sector from the cache */
-int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
{
if (test_bit(boffset / SM_SECTOR_SIZE,
&ftl->cache_data_invalid_bitmap))
@@ -928,7 +928,7 @@ int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
}
/* Write the cache to hardware */
-int sm_cache_flush(struct sm_ftl *ftl)
+static int sm_cache_flush(struct sm_ftl *ftl)
{
struct ftl_zone *zone;
@@ -1274,10 +1274,10 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
static __init int sm_module_init(void)
{
int error = 0;
- cache_flush_workqueue = create_freezable_workqueue("smflush");
- if (IS_ERR(cache_flush_workqueue))
- return PTR_ERR(cache_flush_workqueue);
+ cache_flush_workqueue = create_freezable_workqueue("smflush");
+ if (!cache_flush_workqueue)
+ return -ENOMEM;
error = register_mtd_blktrans(&sm_ftl_ops);
if (error)
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index bd0065c..937a829 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -7,3 +7,12 @@ obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
+
+mtd_oobtest-objs := oobtest.o mtd_test.o
+mtd_pagetest-objs := pagetest.o mtd_test.o
+mtd_readtest-objs := readtest.o mtd_test.o
+mtd_speedtest-objs := speedtest.o mtd_test.o
+mtd_stresstest-objs := stresstest.o mtd_test.o
+mtd_subpagetest-objs := subpagetest.o mtd_test.o
+mtd_torturetest-objs := torturetest.o mtd_test.o
+mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
new file mode 100644
index 0000000..c818a63
--- /dev/null
+++ b/drivers/mtd/tests/mtd_test.c
@@ -0,0 +1,114 @@
+#define pr_fmt(fmt) "mtd_test: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+
+#include "mtd_test.h"
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
+{
+ int err;
+ struct erase_info ei;
+ loff_t addr = ebnum * mtd->erasesize;
+
+ memset(&ei, 0, sizeof(struct erase_info));
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = mtd->erasesize;
+
+ err = mtd_erase(mtd, &ei);
+ if (err) {
+ pr_info("error %d while erasing EB %d\n", err, ebnum);
+ return err;
+ }
+
+ if (ei.state == MTD_ERASE_FAILED) {
+ pr_info("some erase error occurred at EB %d\n", ebnum);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
+{
+ int ret;
+ loff_t addr = ebnum * mtd->erasesize;
+
+ ret = mtd_block_isbad(mtd, addr);
+ if (ret)
+ pr_info("block %d is bad\n", ebnum);
+
+ return ret;
+}
+
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+{
+ int i, bad = 0;
+
+ if (!mtd_can_have_bb(mtd))
+ return 0;
+
+ pr_info("scanning for bad eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0;
+ if (bbt[i])
+ bad += 1;
+ cond_resched();
+ }
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
+
+ return 0;
+}
+
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+{
+ int err;
+ unsigned int i;
+
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = mtdtest_erase_eraseblock(mtd, eb + i);
+ if (err)
+ return err;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+{
+ size_t read;
+ int err;
+
+ err = mtd_read(mtd, addr, size, &read, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+ if (!err && read != size)
+ err = -EIO;
+ if (err)
+ pr_err("error: read failed at %#llx\n", addr);
+
+ return err;
+}
+
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf)
+{
+ size_t written;
+ int err;
+
+ err = mtd_write(mtd, addr, size, &written, buf);
+ if (!err && written != size)
+ err = -EIO;
+ if (err)
+ pr_err("error: write failed at %#llx\n", addr);
+
+ return err;
+}
diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h
new file mode 100644
index 0000000..f437c77
--- /dev/null
+++ b/drivers/mtd/tests/mtd_test.h
@@ -0,0 +1,11 @@
+#include <linux/mtd/mtd.h>
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum);
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt);
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt);
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf);
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf);
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 207bf9a..3cd3aab 100644
--- a/drivers/mtd/tests/mtd_nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -49,6 +49,7 @@
#include <linux/err.h>
#include <linux/mtd/nand.h>
#include <linux/slab.h>
+#include "mtd_test.h"
static int dev;
module_param(dev, int, S_IRUGO);
@@ -98,47 +99,13 @@ static uint8_t hash(unsigned offset)
return c;
}
-static int erase_block(void)
-{
- int err;
- struct erase_info ei;
- loff_t addr = eraseblock * mtd->erasesize;
-
- pr_info("erase_block\n");
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd_erase(mtd, &ei);
- if (err || ei.state == MTD_ERASE_FAILED) {
- pr_err("error %d while erasing\n", err);
- if (!err)
- err = -EIO;
- return err;
- }
-
- return 0;
-}
-
/* Writes wbuffer to page */
static int write_page(int log)
{
- int err = 0;
- size_t written;
-
if (log)
pr_info("write_page\n");
- err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
- if (err || written != mtd->writesize) {
- pr_err("error: write failed at %#llx\n", (long long)offset);
- if (!err)
- err = -EIO;
- }
-
- return err;
+ return mtdtest_write(mtd, offset, mtd->writesize, wbuffer);
}
/* Re-writes the data area while leaving the OOB alone. */
@@ -415,7 +382,7 @@ static int __init mtd_nandbiterrs_init(void)
goto exit_rbuffer;
}
- err = erase_block();
+ err = mtdtest_erase_eraseblock(mtd, eraseblock);
if (err)
goto exit_error;
@@ -428,7 +395,7 @@ static int __init mtd_nandbiterrs_init(void)
goto exit_error;
/* We leave the block un-erased in case of test failure. */
- err = erase_block();
+ err = mtdtest_erase_eraseblock(mtd, eraseblock);
if (err)
goto exit_error;
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/oobtest.c
index 3e24b37..ff35c46 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -31,6 +31,8 @@
#include <linux/sched.h>
#include <linux/random.h>
+#include "mtd_test.h"
+
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -49,49 +51,6 @@ static int use_len_max;
static int vary_offset;
static struct rnd_state rnd_state;
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd_erase(mtd, &ei);
- if (err) {
- pr_err("error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- pr_err("some erase error occurred at EB %d\n", ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-static int erase_whole_device(void)
-{
- int err;
- unsigned int i;
-
- pr_info("erasing whole device\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- return err;
- cond_resched();
- }
- pr_info("erased %u eraseblocks\n", i);
- return 0;
-}
-
static void do_vary_offset(void)
{
use_len -= 1;
@@ -304,38 +263,6 @@ static int verify_all_eraseblocks(void)
return 0;
}
-static int is_block_bad(int ebnum)
-{
- int ret;
- loff_t addr = ebnum * mtd->erasesize;
-
- ret = mtd_block_isbad(mtd, addr);
- if (ret)
- pr_info("block %d is bad\n", ebnum);
- return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kmalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- pr_err("error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- pr_info("scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
- return 0;
-}
-
static int __init mtd_oobtest_init(void)
{
int err = 0;
@@ -380,17 +307,16 @@ static int __init mtd_oobtest_init(void)
err = -ENOMEM;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!readbuf) {
- pr_err("error: cannot allocate memory\n");
+ if (!readbuf)
goto out;
- }
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!writebuf) {
- pr_err("error: cannot allocate memory\n");
+ if (!writebuf)
+ goto out;
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
goto out;
- }
- err = scan_for_bad_eraseblocks();
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -402,7 +328,7 @@ static int __init mtd_oobtest_init(void)
/* First test: write all OOB, read it back and verify */
pr_info("test 1 of 5\n");
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -422,7 +348,7 @@ static int __init mtd_oobtest_init(void)
*/
pr_info("test 2 of 5\n");
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -452,7 +378,7 @@ static int __init mtd_oobtest_init(void)
*/
pr_info("test 3 of 5\n");
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -485,7 +411,7 @@ static int __init mtd_oobtest_init(void)
/* Fourth test: try to write off end of device */
pr_info("test 4 of 5\n");
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -577,7 +503,7 @@ static int __init mtd_oobtest_init(void)
errcnt += 1;
}
- err = erase_eraseblock(ebcnt - 1);
+ err = mtdtest_erase_eraseblock(mtd, ebcnt - 1);
if (err)
goto out;
@@ -626,7 +552,7 @@ static int __init mtd_oobtest_init(void)
pr_info("test 5 of 5\n");
/* Erase all eraseblocks */
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/pagetest.c
index 0c1140b..44b96e9 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/pagetest.c
@@ -31,6 +31,8 @@
#include <linux/sched.h>
#include <linux/random.h>
+#include "mtd_test.h"
+
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -48,52 +50,18 @@ static int pgcnt;
static int errcnt;
static struct rnd_state rnd_state;
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd_erase(mtd, &ei);
- if (err) {
- pr_err("error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- pr_err("some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
static int write_eraseblock(int ebnum)
{
- int err = 0;
- size_t written;
loff_t addr = ebnum * mtd->erasesize;
prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
cond_resched();
- err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
- if (err || written != mtd->erasesize)
- pr_err("error: write failed at %#llx\n",
- (long long)addr);
-
- return err;
+ return mtdtest_write(mtd, addr, mtd->erasesize, writebuf);
}
static int verify_eraseblock(int ebnum)
{
uint32_t j;
- size_t read;
int err = 0, i;
loff_t addr0, addrn;
loff_t addr = ebnum * mtd->erasesize;
@@ -109,31 +77,16 @@ static int verify_eraseblock(int ebnum)
prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
/* Do a read to set the internal dataRAMs to different data */
- err = mtd_read(mtd, addr0, bufsize, &read, twopages);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != bufsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr0);
+ err = mtdtest_read(mtd, addr0, bufsize, twopages);
+ if (err)
return err;
- }
- err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != bufsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)(addrn - bufsize));
+ err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+ if (err)
return err;
- }
memset(twopages, 0, bufsize);
- err = mtd_read(mtd, addr, bufsize, &read, twopages);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != bufsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr);
+ err = mtdtest_read(mtd, addr, bufsize, twopages);
+ if (err)
break;
- }
if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
@@ -145,31 +98,16 @@ static int verify_eraseblock(int ebnum)
struct rnd_state old_state = rnd_state;
/* Do a read to set the internal dataRAMs to different data */
- err = mtd_read(mtd, addr0, bufsize, &read, twopages);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != bufsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr0);
+ err = mtdtest_read(mtd, addr0, bufsize, twopages);
+ if (err)
return err;
- }
- err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != bufsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)(addrn - bufsize));
+ err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+ if (err)
return err;
- }
memset(twopages, 0, bufsize);
- err = mtd_read(mtd, addr, bufsize, &read, twopages);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != bufsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr);
+ err = mtdtest_read(mtd, addr, bufsize, twopages);
+ if (err)
return err;
- }
memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
if (memcmp(twopages, boundary, bufsize)) {
@@ -184,17 +122,14 @@ static int verify_eraseblock(int ebnum)
static int crosstest(void)
{
- size_t read;
int err = 0, i;
loff_t addr, addr0, addrn;
unsigned char *pp1, *pp2, *pp3, *pp4;
pr_info("crosstest\n");
pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
- if (!pp1) {
- pr_err("error: cannot allocate memory\n");
+ if (!pp1)
return -ENOMEM;
- }
pp2 = pp1 + pgsize;
pp3 = pp2 + pgsize;
pp4 = pp3 + pgsize;
@@ -210,24 +145,16 @@ static int crosstest(void)
/* Read 2nd-to-last page to pp1 */
addr = addrn - pgsize - pgsize;
- err = mtd_read(mtd, addr, pgsize, &read, pp1);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp1);
+ if (err) {
kfree(pp1);
return err;
}
/* Read 3rd-to-last page to pp1 */
addr = addrn - pgsize - pgsize - pgsize;
- err = mtd_read(mtd, addr, pgsize, &read, pp1);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp1);
+ if (err) {
kfree(pp1);
return err;
}
@@ -235,12 +162,8 @@ static int crosstest(void)
/* Read first page to pp2 */
addr = addr0;
pr_info("reading page at %#llx\n", (long long)addr);
- err = mtd_read(mtd, addr, pgsize, &read, pp2);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp2);
+ if (err) {
kfree(pp1);
return err;
}
@@ -248,12 +171,8 @@ static int crosstest(void)
/* Read last page to pp3 */
addr = addrn - pgsize;
pr_info("reading page at %#llx\n", (long long)addr);
- err = mtd_read(mtd, addr, pgsize, &read, pp3);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp3);
+ if (err) {
kfree(pp1);
return err;
}
@@ -261,12 +180,8 @@ static int crosstest(void)
/* Read first page again to pp4 */
addr = addr0;
pr_info("reading page at %#llx\n", (long long)addr);
- err = mtd_read(mtd, addr, pgsize, &read, pp4);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp4);
+ if (err) {
kfree(pp1);
return err;
}
@@ -285,7 +200,6 @@ static int crosstest(void)
static int erasecrosstest(void)
{
- size_t read, written;
int err = 0, i, ebnum, ebnum2;
loff_t addr0;
char *readbuf = twopages;
@@ -304,30 +218,22 @@ static int erasecrosstest(void)
ebnum2 -= 1;
pr_info("erasing block %d\n", ebnum);
- err = erase_eraseblock(ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
if (err)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
prandom_bytes_state(&rnd_state, writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
- err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
- if (err || written != pgsize) {
- pr_info("error: write failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
- err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
+ err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+ if (err)
+ return err;
pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
@@ -337,35 +243,27 @@ static int erasecrosstest(void)
}
pr_info("erasing block %d\n", ebnum);
- err = erase_eraseblock(ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
if (err)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
prandom_bytes_state(&rnd_state, writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
- err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
- if (err || written != pgsize) {
- pr_err("error: write failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
pr_info("erasing block %d\n", ebnum2);
- err = erase_eraseblock(ebnum2);
+ err = mtdtest_erase_eraseblock(mtd, ebnum2);
if (err)
return err;
pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
- err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
+ err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+ if (err)
+ return err;
pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
@@ -381,7 +279,6 @@ static int erasecrosstest(void)
static int erasetest(void)
{
- size_t read, written;
int err = 0, i, ebnum, ok = 1;
loff_t addr0;
@@ -395,33 +292,25 @@ static int erasetest(void)
}
pr_info("erasing block %d\n", ebnum);
- err = erase_eraseblock(ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
if (err)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
prandom_bytes_state(&rnd_state, writebuf, pgsize);
- err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
- if (err || written != pgsize) {
- pr_err("error: write failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
pr_info("erasing block %d\n", ebnum);
- err = erase_eraseblock(ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
if (err)
return err;
pr_info("reading 1st page of block %d\n", ebnum);
- err = mtd_read(mtd, addr0, pgsize, &read, twopages);
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr0);
- return err ? err : -1;
- }
+ err = mtdtest_read(mtd, addr0, pgsize, twopages);
+ if (err)
+ return err;
pr_info("verifying 1st page of block %d is all 0xff\n",
ebnum);
@@ -440,38 +329,6 @@ static int erasetest(void)
return err;
}
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd_block_isbad(mtd, addr);
- if (ret)
- pr_info("block %d is bad\n", ebnum);
- return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- pr_err("error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- pr_info("scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
- return 0;
-}
-
static int __init mtd_pagetest_init(void)
{
int err = 0;
@@ -516,36 +373,28 @@ static int __init mtd_pagetest_init(void)
err = -ENOMEM;
bufsize = pgsize * 2;
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!writebuf) {
- pr_err("error: cannot allocate memory\n");
+ if (!writebuf)
goto out;
- }
twopages = kmalloc(bufsize, GFP_KERNEL);
- if (!twopages) {
- pr_err("error: cannot allocate memory\n");
+ if (!twopages)
goto out;
- }
boundary = kmalloc(bufsize, GFP_KERNEL);
- if (!boundary) {
- pr_err("error: cannot allocate memory\n");
+ if (!boundary)
goto out;
- }
- err = scan_for_bad_eraseblocks();
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Erase all eraseblocks */
pr_info("erasing whole device\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- goto out;
- cond_resched();
- }
- pr_info("erased %u eraseblocks\n", i);
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+ pr_info("erased %u eraseblocks\n", ebcnt);
/* Write all eraseblocks */
prandom_seed_state(&rnd_state, 1);
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/readtest.c
index 266de04..626e66d 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/readtest.c
@@ -29,6 +29,8 @@
#include <linux/slab.h>
#include <linux/sched.h>
+#include "mtd_test.h"
+
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -44,7 +46,6 @@ static int pgcnt;
static int read_eraseblock_by_page(int ebnum)
{
- size_t read;
int i, ret, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
@@ -52,16 +53,10 @@ static int read_eraseblock_by_page(int ebnum)
for (i = 0; i < pgcnt; i++) {
memset(buf, 0 , pgsize);
- ret = mtd_read(mtd, addr, pgsize, &read, buf);
- if (ret == -EUCLEAN)
- ret = 0;
- if (ret || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- (long long)addr);
+ ret = mtdtest_read(mtd, addr, pgsize, buf);
+ if (ret) {
if (!err)
err = ret;
- if (!err)
- err = -EINVAL;
}
if (mtd->oobsize) {
struct mtd_oob_ops ops;
@@ -127,41 +122,6 @@ static void dump_eraseblock(int ebnum)
}
}
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd_block_isbad(mtd, addr);
- if (ret)
- pr_info("block %d is bad\n", ebnum);
- return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- pr_err("error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- if (!mtd_can_have_bb(mtd))
- return 0;
-
- pr_info("scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
- return 0;
-}
-
static int __init mtd_readtest_init(void)
{
uint64_t tmp;
@@ -204,17 +164,16 @@ static int __init mtd_readtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!iobuf) {
- pr_err("error: cannot allocate memory\n");
+ if (!iobuf)
goto out;
- }
iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!iobuf1) {
- pr_err("error: cannot allocate memory\n");
+ if (!iobuf1)
goto out;
- }
- err = scan_for_bad_eraseblocks();
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/speedtest.c
index a6ce9c1..87ff6a2 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/speedtest.c
@@ -30,6 +30,8 @@
#include <linux/sched.h>
#include <linux/random.h>
+#include "mtd_test.h"
+
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -49,33 +51,6 @@ static int pgcnt;
static int goodebcnt;
static struct timeval start, finish;
-
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd_erase(mtd, &ei);
- if (err) {
- pr_err("error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- pr_err("some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
static int multiblock_erase(int ebnum, int blocks)
{
int err;
@@ -103,54 +78,23 @@ static int multiblock_erase(int ebnum, int blocks)
return 0;
}
-static int erase_whole_device(void)
-{
- int err;
- unsigned int i;
-
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- return err;
- cond_resched();
- }
- return 0;
-}
-
static int write_eraseblock(int ebnum)
{
- size_t written;
- int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
- if (err || written != mtd->erasesize) {
- pr_err("error: write failed at %#llx\n", addr);
- if (!err)
- err = -EINVAL;
- }
-
- return err;
+ return mtdtest_write(mtd, addr, mtd->erasesize, iobuf);
}
static int write_eraseblock_by_page(int ebnum)
{
- size_t written;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd_write(mtd, addr, pgsize, &written, buf);
- if (err || written != pgsize) {
- pr_err("error: write failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_write(mtd, addr, pgsize, buf);
+ if (err)
break;
- }
addr += pgsize;
buf += pgsize;
}
@@ -160,74 +104,41 @@ static int write_eraseblock_by_page(int ebnum)
static int write_eraseblock_by_2pages(int ebnum)
{
- size_t written, sz = pgsize * 2;
+ size_t sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd_write(mtd, addr, sz, &written, buf);
- if (err || written != sz) {
- pr_err("error: write failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_write(mtd, addr, sz, buf);
+ if (err)
return err;
- }
addr += sz;
buf += sz;
}
- if (pgcnt % 2) {
- err = mtd_write(mtd, addr, pgsize, &written, buf);
- if (err || written != pgsize) {
- pr_err("error: write failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
- }
- }
+ if (pgcnt % 2)
+ err = mtdtest_write(mtd, addr, pgsize, buf);
return err;
}
static int read_eraseblock(int ebnum)
{
- size_t read;
- int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd_read(mtd, addr, mtd->erasesize, &read, iobuf);
- /* Ignore corrected ECC errors */
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != mtd->erasesize) {
- pr_err("error: read failed at %#llx\n", addr);
- if (!err)
- err = -EINVAL;
- }
-
- return err;
+ return mtdtest_read(mtd, addr, mtd->erasesize, iobuf);
}
static int read_eraseblock_by_page(int ebnum)
{
- size_t read;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd_read(mtd, addr, pgsize, &read, buf);
- /* Ignore corrected ECC errors */
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_read(mtd, addr, pgsize, buf);
+ if (err)
break;
- }
addr += pgsize;
buf += pgsize;
}
@@ -237,53 +148,24 @@ static int read_eraseblock_by_page(int ebnum)
static int read_eraseblock_by_2pages(int ebnum)
{
- size_t read, sz = pgsize * 2;
+ size_t sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd_read(mtd, addr, sz, &read, buf);
- /* Ignore corrected ECC errors */
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != sz) {
- pr_err("error: read failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_read(mtd, addr, sz, buf);
+ if (err)
return err;
- }
addr += sz;
buf += sz;
}
- if (pgcnt % 2) {
- err = mtd_read(mtd, addr, pgsize, &read, buf);
- /* Ignore corrected ECC errors */
- if (mtd_is_bitflip(err))
- err = 0;
- if (err || read != pgsize) {
- pr_err("error: read failed at %#llx\n",
- addr);
- if (!err)
- err = -EINVAL;
- }
- }
+ if (pgcnt % 2)
+ err = mtdtest_read(mtd, addr, pgsize, buf);
return err;
}
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd_block_isbad(mtd, addr);
- if (ret)
- pr_info("block %d is bad\n", ebnum);
- return ret;
-}
-
static inline void start_timing(void)
{
do_gettimeofday(&start);
@@ -308,32 +190,6 @@ static long calc_speed(void)
return k;
}
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- pr_err("error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- if (!mtd_can_have_bb(mtd))
- goto out;
-
- pr_info("scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
-out:
- goodebcnt = ebcnt - bad;
- return 0;
-}
-
static int __init mtd_speedtest_init(void)
{
int err, i, blocks, j, k;
@@ -384,18 +240,23 @@ static int __init mtd_speedtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!iobuf) {
- pr_err("error: cannot allocate memory\n");
+ if (!iobuf)
goto out;
- }
prandom_bytes(iobuf, mtd->erasesize);
- err = scan_for_bad_eraseblocks();
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
+ for (i = 0; i < ebcnt; i++) {
+ if (!bbt[i])
+ goodebcnt++;
+ }
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -429,7 +290,7 @@ static int __init mtd_speedtest_init(void)
speed = calc_speed();
pr_info("eraseblock read speed is %ld KiB/s\n", speed);
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -463,7 +324,7 @@ static int __init mtd_speedtest_init(void)
speed = calc_speed();
pr_info("page read speed is %ld KiB/s\n", speed);
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -500,14 +361,9 @@ static int __init mtd_speedtest_init(void)
/* Erase all eraseblocks */
pr_info("Testing erase speed\n");
start_timing();
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- goto out;
- cond_resched();
- }
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
stop_timing();
speed = calc_speed();
pr_info("erase speed is %ld KiB/s\n", speed);
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/stresstest.c
index 787f539..c9d42cc 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/stresstest.c
@@ -31,6 +31,8 @@
#include <linux/vmalloc.h>
#include <linux/random.h>
+#include "mtd_test.h"
+
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -81,49 +83,11 @@ static int rand_len(int offs)
return len;
}
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd_erase(mtd, &ei);
- if (unlikely(err)) {
- pr_err("error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (unlikely(ei.state == MTD_ERASE_FAILED)) {
- pr_err("some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd_block_isbad(mtd, addr);
- if (ret)
- pr_info("block %d is bad\n", ebnum);
- return ret;
-}
-
static int do_read(void)
{
- size_t read;
int eb = rand_eb();
int offs = rand_offs();
- int len = rand_len(offs), err;
+ int len = rand_len(offs);
loff_t addr;
if (bbt[eb + 1]) {
@@ -133,28 +97,17 @@ static int do_read(void)
len = mtd->erasesize - offs;
}
addr = eb * mtd->erasesize + offs;
- err = mtd_read(mtd, addr, len, &read, readbuf);
- if (mtd_is_bitflip(err))
- err = 0;
- if (unlikely(err || read != len)) {
- pr_err("error: read failed at 0x%llx\n",
- (long long)addr);
- if (!err)
- err = -EINVAL;
- return err;
- }
- return 0;
+ return mtdtest_read(mtd, addr, len, readbuf);
}
static int do_write(void)
{
int eb = rand_eb(), offs, err, len;
- size_t written;
loff_t addr;
offs = offsets[eb];
if (offs >= mtd->erasesize) {
- err = erase_eraseblock(eb);
+ err = mtdtest_erase_eraseblock(mtd, eb);
if (err)
return err;
offs = offsets[eb] = 0;
@@ -165,21 +118,16 @@ static int do_write(void)
if (bbt[eb + 1])
len = mtd->erasesize - offs;
else {
- err = erase_eraseblock(eb + 1);
+ err = mtdtest_erase_eraseblock(mtd, eb + 1);
if (err)
return err;
offsets[eb + 1] = 0;
}
}
addr = eb * mtd->erasesize + offs;
- err = mtd_write(mtd, addr, len, &written, writebuf);
- if (unlikely(err || written != len)) {
- pr_err("error: write failed at 0x%llx\n",
- (long long)addr);
- if (!err)
- err = -EINVAL;
+ err = mtdtest_write(mtd, addr, len, writebuf);
+ if (unlikely(err))
return err;
- }
offs += len;
while (offs > mtd->erasesize) {
offsets[eb++] = mtd->erasesize;
@@ -197,30 +145,6 @@ static int do_operation(void)
return do_write();
}
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- pr_err("error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- if (!mtd_can_have_bb(mtd))
- return 0;
-
- pr_info("scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
- return 0;
-}
-
static int __init mtd_stresstest_init(void)
{
int err;
@@ -276,15 +200,16 @@ static int __init mtd_stresstest_init(void)
readbuf = vmalloc(bufsize);
writebuf = vmalloc(bufsize);
offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
- if (!readbuf || !writebuf || !offsets) {
- pr_err("error: cannot allocate memory\n");
+ if (!readbuf || !writebuf || !offsets)
goto out;
- }
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
prandom_bytes(writebuf, bufsize);
- err = scan_for_bad_eraseblocks();
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/subpagetest.c
index aade56f..e2c0adf 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -30,6 +30,8 @@
#include <linux/sched.h>
#include <linux/random.h>
+#include "mtd_test.h"
+
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -51,50 +53,6 @@ static inline void clear_data(unsigned char *buf, size_t len)
memset(buf, 0, len);
}
-static int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd_erase(mtd, &ei);
- if (err) {
- pr_err("error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- pr_err("some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-static int erase_whole_device(void)
-{
- int err;
- unsigned int i;
-
- pr_info("erasing whole device\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = erase_eraseblock(i);
- if (err)
- return err;
- cond_resched();
- }
- pr_info("erased %u eraseblocks\n", i);
- return 0;
-}
-
static int write_eraseblock(int ebnum)
{
size_t written;
@@ -317,38 +275,6 @@ static int verify_all_eraseblocks_ff(void)
return 0;
}
-static int is_block_bad(int ebnum)
-{
- loff_t addr = ebnum * mtd->erasesize;
- int ret;
-
- ret = mtd_block_isbad(mtd, addr);
- if (ret)
- pr_info("block %d is bad\n", ebnum);
- return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
- int i, bad = 0;
-
- bbt = kzalloc(ebcnt, GFP_KERNEL);
- if (!bbt) {
- pr_err("error: cannot allocate memory\n");
- return -ENOMEM;
- }
-
- pr_info("scanning for bad eraseblocks\n");
- for (i = 0; i < ebcnt; ++i) {
- bbt[i] = is_block_bad(i) ? 1 : 0;
- if (bbt[i])
- bad += 1;
- cond_resched();
- }
- pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
- return 0;
-}
-
static int __init mtd_subpagetest_init(void)
{
int err = 0;
@@ -393,21 +319,20 @@ static int __init mtd_subpagetest_init(void)
err = -ENOMEM;
bufsize = subpgsize * 32;
writebuf = kmalloc(bufsize, GFP_KERNEL);
- if (!writebuf) {
- pr_info("error: cannot allocate memory\n");
+ if (!writebuf)
goto out;
- }
readbuf = kmalloc(bufsize, GFP_KERNEL);
- if (!readbuf) {
- pr_info("error: cannot allocate memory\n");
+ if (!readbuf)
+ goto out;
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
goto out;
- }
- err = scan_for_bad_eraseblocks();
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -439,7 +364,7 @@ static int __init mtd_subpagetest_init(void)
}
pr_info("verified %u eraseblocks\n", i);
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
@@ -477,7 +402,7 @@ static int __init mtd_subpagetest_init(void)
}
pr_info("verified %u eraseblocks\n", i);
- err = erase_whole_device();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/torturetest.c
index 3a9f6a6..eeab969 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/torturetest.c
@@ -32,6 +32,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include "mtd_test.h"
#define RETRIES 3
@@ -93,35 +94,6 @@ static inline void stop_timing(void)
}
/*
- * Erase eraseblock number @ebnum.
- */
-static inline int erase_eraseblock(int ebnum)
-{
- int err;
- struct erase_info ei;
- loff_t addr = ebnum * mtd->erasesize;
-
- memset(&ei, 0, sizeof(struct erase_info));
- ei.mtd = mtd;
- ei.addr = addr;
- ei.len = mtd->erasesize;
-
- err = mtd_erase(mtd, &ei);
- if (err) {
- pr_err("error %d while erasing EB %d\n", err, ebnum);
- return err;
- }
-
- if (ei.state == MTD_ERASE_FAILED) {
- pr_err("some erase error occurred at EB %d\n",
- ebnum);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
* Check that the contents of eraseblock number @enbum is equivalent to the
* @buf buffer.
*/
@@ -208,7 +180,7 @@ static inline int write_pattern(int ebnum, void *buf)
static int __init tort_init(void)
{
int err = 0, i, infinite = !cycles_count;
- int *bad_ebs;
+ unsigned char *bad_ebs;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
@@ -265,7 +237,7 @@ static int __init tort_init(void)
if (!check_buf)
goto out_patt_FF;
- bad_ebs = kcalloc(ebcnt, sizeof(*bad_ebs), GFP_KERNEL);
+ bad_ebs = kzalloc(ebcnt, GFP_KERNEL);
if (!bad_ebs)
goto out_check_buf;
@@ -283,40 +255,16 @@ static int __init tort_init(void)
}
}
- /*
- * Check if there is a bad eraseblock among those we are going to test.
- */
- if (mtd_can_have_bb(mtd)) {
- for (i = eb; i < eb + ebcnt; i++) {
- err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
-
- if (err < 0) {
- pr_info("block_isbad() returned %d "
- "for EB %d\n", err, i);
- goto out;
- }
-
- if (err) {
- pr_err("EB %d is bad. Skip it.\n", i);
- bad_ebs[i - eb] = 1;
- }
- }
- }
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ if (err)
+ goto out;
start_timing();
while (1) {
int i;
void *patt;
- /* Erase all eraseblocks */
- for (i = eb; i < eb + ebcnt; i++) {
- if (bad_ebs[i - eb])
- continue;
- err = erase_eraseblock(i);
- if (err)
- goto out;
- cond_resched();
- }
+ mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
/* Check if the eraseblocks contain only 0xFF bytes */
if (check) {
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 1542751..f5aa4b0 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1343,7 +1343,7 @@ out:
static int invalidate_fastmap(struct ubi_device *ubi,
struct ubi_fastmap_layout *fm)
{
- int ret, i;
+ int ret;
struct ubi_vid_hdr *vh;
ret = erase_block(ubi, fm->e[0]->pnum);
@@ -1360,9 +1360,6 @@ static int invalidate_fastmap(struct ubi_device *ubi,
vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
- for (i = 0; i < fm->used_blocks; i++)
- ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
-
return ret;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5df49d3..c95bfb1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
+
+ /* Give the unused PEB back */
+ wl_tree_add(e2, &ubi->free);
goto out_cancel;
}
self_check_in_wl_tree(ubi, e1, &ubi->used);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 91f179d..f428ef57 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1472,7 +1472,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->lp_counter++;
/* send learning packets */
- if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) {
+ if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
/* change of curr_active_slave involves swapping of mac addresses.
* in order to avoid this swapping from happening while
* sending the learning packets, the curr_slave_lock must be held for
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 28d8e4c..c5eff5d 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -36,14 +36,15 @@ struct slave;
* Used for division - never set
* to zero !!!
*/
-#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
- * learning packets to the switch
- */
+#define BOND_ALB_DEFAULT_LP_INTERVAL 1
+#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of
+ * learning packets to the switch
+ */
#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
* ALB_TIMER_TICKS_PER_SEC)
-#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
+#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
* ALB_TIMER_TICKS_PER_SEC)
#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 39e5b1c..55bbb8b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2404,8 +2404,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
slave->target_last_arp_rx[i] = jiffies;
}
-static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr;
@@ -4416,6 +4416,7 @@ static int bond_check_params(struct bond_params *params)
params->all_slaves_active = all_slaves_active;
params->resend_igmp = resend_igmp;
params->min_links = min_links;
+ params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ce46776..c29b836 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -349,6 +349,8 @@ static ssize_t bonding_store_mode(struct device *d,
goto out;
}
+ /* don't cache arp_validate between modes */
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = new_value;
bond_set_mode_ops(bond, bond->params.mode);
pr_info("%s: setting mode to %s (%d).\n",
@@ -419,27 +421,39 @@ static ssize_t bonding_store_arp_validate(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value;
struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
+ if (!rtnl_trylock())
+ return restart_syscall();
new_value = bond_parse_parm(buf, arp_validate_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid arp_validate value %s\n",
bond->dev->name, buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+ if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
pr_err("%s: arp_validate only supported in active-backup mode.\n",
bond->dev->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
pr_info("%s: setting arp_validate to %s (%d).\n",
bond->dev->name, arp_validate_tbl[new_value].modename,
new_value);
+ if (bond->dev->flags & IFF_UP) {
+ if (!new_value)
+ bond->recv_probe = NULL;
+ else if (bond->params.arp_interval)
+ bond->recv_probe = bond_arp_rcv;
+ }
bond->params.arp_validate = new_value;
+out:
+ rtnl_unlock();
- return count;
+ return ret;
}
static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
@@ -555,8 +569,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
if (!rtnl_trylock())
return restart_syscall();
@@ -599,8 +613,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
* is called.
*/
if (!new_value) {
+ if (bond->params.arp_validate)
+ bond->recv_probe = NULL;
cancel_delayed_work_sync(&bond->arp_work);
} else {
+ /* arp_validate can be set only in active-backup mode */
+ if (bond->params.arp_validate)
+ bond->recv_probe = bond_arp_rcv;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -1680,6 +1699,44 @@ out:
static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
bonding_show_resend_igmp, bonding_store_resend_igmp);
+
+static ssize_t bonding_show_lp_interval(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+ return sprintf(buf, "%d\n", bond->params.lp_interval);
+}
+
+static ssize_t bonding_store_lp_interval(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
+
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no lp interval value specified.\n",
+ bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (new_value <= 0) {
+ pr_err ("%s: lp_interval must be between 1 and %d\n",
+ bond->dev->name, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bond->params.lp_interval = new_value;
+out:
+ return ret;
+}
+
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+ bonding_show_lp_interval, bonding_store_lp_interval);
+
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
&dev_attr_mode.attr,
@@ -1710,6 +1767,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_all_slaves_active.attr,
&dev_attr_resend_igmp.attr,
&dev_attr_min_links.attr,
+ &dev_attr_lp_interval.attr,
NULL,
};
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index f7ab161..03cf3fd 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -176,6 +176,7 @@ struct bond_params {
int tx_queues;
int all_slaves_active;
int resend_igmp;
+ int lp_interval;
};
struct bond_parm_tbl {
@@ -430,6 +431,7 @@ static inline bool slave_can_tx(struct slave *slave)
struct bond_net;
+int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e66684a..75fb1d2 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -530,7 +530,7 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
if (lp->wol && !lp->irq_wake_requested) {
/* register wake irq handler */
rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
- IRQF_DISABLED, "EMAC_WAKE", dev);
+ 0, "EMAC_WAKE", dev);
if (rc)
return rc;
lp->irq_wake_requested = true;
@@ -1686,7 +1686,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
/* now, enable interrupts */
/* register irq handler */
rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
- IRQF_DISABLED, "EMAC_RX", ndev);
+ 0, "EMAC_RX", ndev);
if (rc) {
dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
rc = -EBUSY;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 3d86ffe..94edc9c 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -725,6 +725,7 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
+ clear_ioasic_dma_irq(irq);
printk(KERN_ERR "%s: DMA error\n", dev->name);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index d6b2029..3d8c6b2 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -358,7 +358,7 @@ static int __init lance_probe( struct net_device *dev)
REGA(CSR0) = CSR0_STOP;
- if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) {
+ if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 027398e..fc95b23 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1188,7 +1188,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct alx_priv *alx;
struct alx_hw *hw;
bool phy_configured;
- int bars, pm_cap, err;
+ int bars, err;
err = pci_enable_device_mem(pdev);
if (err)
@@ -1225,18 +1225,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
- pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pm_cap == 0) {
+ if (!pdev->pm_cap) {
dev_err(&pdev->dev,
"Can't find power management capability, aborting\n");
err = -EIO;
goto out_pci_release;
}
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- goto out_pci_release;
-
netdev = alloc_etherdev(sizeof(*alx));
if (!netdev) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 8ac48fb..b9a5fb6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -926,13 +926,13 @@ static int bcm_enet_open(struct net_device *dev)
if (ret)
goto out_phy_disconnect;
- ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
+ ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
dev->name, dev);
if (ret)
goto out_freeirq;
ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq_rx;
@@ -2156,13 +2156,13 @@ static int bcm_enetsw_open(struct net_device *dev)
enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq;
if (priv->irq_tx != -1) {
ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq_rx;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index eec0af4..249468f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
if (++ring->end >= BGMAC_TX_RING_SLOTS)
ring->end = 0;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+ ring->index_base +
ring->end * sizeof(struct bgmac_dma_desc));
/* Always keep one slot free to allow detecting bugged calls. */
@@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
/* The last slot that hardware didn't consume yet */
empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
empty_slot &= BGMAC_DMA_TX_STATDPTR;
+ empty_slot -= ring->index_base;
+ empty_slot &= BGMAC_DMA_TX_STATDPTR;
empty_slot /= sizeof(struct bgmac_dma_desc);
while (ring->start != empty_slot) {
@@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
end_slot &= BGMAC_DMA_RX_STATDPTR;
+ end_slot -= ring->index_base;
+ end_slot &= BGMAC_DMA_RX_STATDPTR;
end_slot /= sizeof(struct bgmac_dma_desc);
ring->end = end_slot;
@@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring = &bgmac->tx_ring[i];
ring->num_slots = BGMAC_TX_RING_SLOTS;
ring->mmio_base = ring_base[i];
- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
- bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
- ring->mmio_base);
/* Alloc ring of descriptors */
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
if (ring->dma_base & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+ BGMAC_DMA_RING_TX);
+ if (ring->unaligned)
+ ring->index_base = lower_32_bits(ring->dma_base);
+ else
+ ring->index_base = 0;
+
/* No need to alloc TX slots yet */
}
@@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring = &bgmac->rx_ring[i];
ring->num_slots = BGMAC_RX_RING_SLOTS;
ring->mmio_base = ring_base[i];
- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
- bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
- ring->mmio_base);
/* Alloc ring of descriptors */
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
if (ring->dma_base & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+ BGMAC_DMA_RING_RX);
+ if (ring->unaligned)
+ ring->index_base = lower_32_bits(ring->dma_base);
+ else
+ ring->index_base = 0;
+
/* Alloc RX slots */
for (j = 0; j < ring->num_slots; j++) {
err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
@@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
ring = &bgmac->tx_ring[i];
- /* We don't implement unaligned addressing, so enable first */
- bgmac_dma_tx_enable(bgmac, ring);
+ if (!ring->unaligned)
+ bgmac_dma_tx_enable(bgmac, ring);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
lower_32_bits(ring->dma_base));
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
upper_32_bits(ring->dma_base));
+ if (ring->unaligned)
+ bgmac_dma_tx_enable(bgmac, ring);
ring->start = 0;
ring->end = 0; /* Points the slot that should *not* be read */
@@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
ring = &bgmac->rx_ring[i];
- /* We don't implement unaligned addressing, so enable first */
- bgmac_dma_rx_enable(bgmac, ring);
+ if (!ring->unaligned)
+ bgmac_dma_rx_enable(bgmac, ring);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
lower_32_bits(ring->dma_base));
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
upper_32_bits(ring->dma_base));
+ if (ring->unaligned)
+ bgmac_dma_rx_enable(bgmac, ring);
for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
j++, dma_desc++) {
@@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
}
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+ ring->index_base +
ring->num_slots * sizeof(struct bgmac_dma_desc));
ring->start = 0;
@@ -908,10 +926,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
u8 et_swtype = 0;
u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
- BGMAC_CHIPCTL_1_IF_TYPE_RMII;
- char buf[2];
+ BGMAC_CHIPCTL_1_IF_TYPE_MII;
+ char buf[4];
- if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
if (kstrtou8(buf, 0, &et_swtype))
bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
buf);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 98d4b5f..66c8afb 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -333,7 +333,7 @@
#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
-#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
@@ -384,6 +384,8 @@ struct bgmac_dma_ring {
u16 mmio_base;
struct bgmac_dma_desc *cpu_base;
dma_addr_t dma_base;
+ u32 index_base; /* Used for unaligned rings only, otherwise 0 */
+ bool unaligned;
struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0c33802..97b3d32 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -246,8 +246,37 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+/* use a value high enough to be above all the PFs, which has least significant
+ * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
+ * calculate doorbell address according to old doorbell configuration scheme
+ * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
+ * We must avoid coming up with cid 8 for iscsi since according to this method
+ * the designated UIO cid will come out 0 and it has a special handling for that
+ * case which doesn't suit us. Therefore will will cieling to closes cid which
+ * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
+ */
+
+#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
(bp)->max_cos)
+/* amount of cids traversed by UIO's DPM addition to doorbell */
+#define UIO_DPM 8
+/* roundup to DPM offset */
+#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
+ UIO_DPM))
+/* offset to nearest value which has lsb nibble matching DPM */
+#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
+ (UIO_DPM * 2))
+/* add offset to rounded-up cid to get a value which could be used with UIO */
+#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
+/* but wait - avoid UIO special case for cid 0 */
+#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
+ (UIO_DPM_ALIGN(bp) == UIO_DPM))
+/* Properly DPM aligned CID dajusted to cid 0 secal case */
+#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
+ (UIO_DPM_CID0_OFFSET(bp)))
+/* how many cids were wasted - need this value for cid allocation */
+#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
+ BNX2X_1st_NON_L2_ETH_CID(bp))
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
@@ -1542,7 +1571,6 @@ struct bnx2x {
*/
bool fcoe_init;
- int pm_cap;
int mrrs;
struct delayed_work sp_task;
@@ -1681,10 +1709,11 @@ struct bnx2x {
* Maximum CID count that might be required by the bnx2x:
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
+
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2361bf2..61726af 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -490,10 +490,10 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
-static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+ struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
dma_addr_t mapping;
@@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* If we fail to allocate a substitute page, we simply stop
where we are and drop the whole packet */
- err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
if (unlikely(err)) {
bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
@@ -616,12 +616,17 @@ static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
kfree(data);
}
-static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
{
- if (fp->rx_frag_size)
+ if (fp->rx_frag_size) {
+ /* GFP_KERNEL allocations are used only during initialization */
+ if (unlikely(gfp_mask & __GFP_WAIT))
+ return (void *)__get_free_page(gfp_mask);
+
return netdev_alloc_frag(fp->rx_frag_size);
+ }
- return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
}
#ifdef CONFIG_INET
@@ -701,7 +706,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
goto drop;
/* Try to allocate the new data */
- new_data = bnx2x_frag_alloc(fp);
+ new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
@@ -752,15 +757,15 @@ drop:
bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
-static int bnx2x_alloc_rx_data(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- data = bnx2x_frag_alloc(fp);
+ data = bnx2x_frag_alloc(fp, gfp_mask);
if (unlikely(data == NULL))
return -ENOMEM;
@@ -953,7 +958,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
memcpy(skb->data, data + pad, len);
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
} else {
- if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
+ GFP_ATOMIC) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
@@ -1313,7 +1319,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->data = bnx2x_frag_alloc(fp);
+ first_buf->data =
+ bnx2x_frag_alloc(fp, GFP_KERNEL);
if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
@@ -1335,7 +1342,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
for (i = 0, ring_prod = 0;
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
- if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
+ GFP_KERNEL) < 0) {
BNX2X_ERR("was only able to allocate %d rx sges\n",
i);
BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -3000,16 +3008,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
u16 pmcsr;
/* If there is no power capability, silently succeed */
- if (!bp->pm_cap) {
+ if (!bp->pdev->pm_cap) {
BNX2X_DEV_INFO("No power capability. Breaking.\n");
return 0;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
switch (state) {
case PCI_D0:
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
PCI_PM_CTRL_PME_STATUS));
@@ -3033,7 +3041,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
if (bp->wol)
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
pmcsr);
/* No more memory access after this point until
@@ -4221,7 +4229,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
failure_cnt++;
continue;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2612e3c..324de5f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1387,9 +1387,9 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
u16 pm = 0;
struct net_device *dev = pci_get_drvdata(bp->pdev);
- if (bp->pm_cap)
+ if (bp->pdev->pm_cap)
rc = pci_read_config_word(bp->pdev,
- bp->pm_cap + PCI_PM_CTRL, &pm);
+ bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
if ((rc && !netif_running(dev)) ||
(!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 634a793..a6704b5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -7645,6 +7645,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
bnx2x_iov_init_dq(bp);
@@ -8651,6 +8652,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
+ struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -8667,9 +8669,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
/* Enable the PME and clear the status */
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
@@ -10398,7 +10400,7 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
break;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
BNX2X_DEV_INFO("%sWoL capable\n",
@@ -12140,8 +12142,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
}
if (IS_PF(bp)) {
- bp->pm_cap = pdev->pm_cap;
- if (bp->pm_cap == 0) {
+ if (!pdev->pm_cap) {
dev_err(&bp->pdev->dev,
"Cannot find power management capability, aborting\n");
rc = -EIO;
@@ -13631,6 +13632,10 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+ DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
+ BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
+ cp->iscsi_l2_cid);
+
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index b26eb83..2604b62 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1756,9 +1756,6 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
- /* set the number of VF allowed doorbells to the full DQ range */
- REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
-
/* set the VF doorbell threshold */
REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
}
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 8142480..99394bd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3135,6 +3135,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -3146,7 +3147,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
+ if (!CNIC_SUPPORTS_FCOE(bp)) {
cp->arm_int(dev, status_idx);
break;
}
@@ -5217,7 +5218,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
"iSCSI CLIENT_SETUP did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
cnic_ring_ctl(dev, cid, cli, 1);
- *cid_ptr = cid;
+ *cid_ptr = cid >> 4;
+ *(cid_ptr + 1) = cid * bp->db_size;
}
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5701f3d..12d961c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3034,6 +3034,7 @@ static bool tg3_phy_led_bug(struct tg3 *tp)
{
switch (tg3_asic_rev(tp)) {
case ASIC_REV_5719:
+ case ASIC_REV_5720:
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
!tp->pci_fn)
return true;
@@ -16192,12 +16193,12 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* So explicitly force the chip into D0 here.
*/
pci_read_config_dword(tp->pdev,
- tp->pm_cap + PCI_PM_CTRL,
+ tp->pdev->pm_cap + PCI_PM_CTRL,
&pm_reg);
pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
pci_write_config_dword(tp->pdev,
- tp->pm_cap + PCI_PM_CTRL,
+ tp->pdev->pm_cap + PCI_PM_CTRL,
pm_reg);
/* Also, force SERR#/PERR# in PCI command. */
@@ -17346,7 +17347,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tp = netdev_priv(dev);
tp->pdev = pdev;
tp->dev = dev;
- tp->pm_cap = pdev->pm_cap;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
tp->irq_sync = 1;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ddb8be1..7025780 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3234,7 +3234,6 @@ struct tg3 {
u8 pci_lat_timer;
int pci_fn;
- int pm_cap;
int msi_cap;
int pcix_cap;
int pcie_readrq;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 8030cc0..751d5c7 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
- depends on GENERIC_HARDIRQS && HAS_DMA
+ depends on HAS_DMA
select MACB
---help---
If you wish to compile a kernel for the AT91RM9200 and enable
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d0665c..c73cabd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6149,8 +6149,10 @@ static int __init cxgb4_init_module(void)
pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
- if (ret < 0)
+ if (ret < 0) {
debugfs_remove(cxgb4_debugfs_root);
+ destroy_workqueue(workq);
+ }
register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 2db6c57..263b92c 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1321,7 +1321,7 @@ de4x5_open(struct net_device *dev)
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("\n Cannot get IRQ- reconfigure your hardware.\n");
disable_ast(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3224d28..100b528 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2802,7 +2802,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
struct be_resources res = {0};
struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
- int status;
+ int status = 0;
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f9aacf5..b2793b9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2199,7 +2199,7 @@ fec_probe(struct platform_device *pdev)
goto failed_irq;
}
ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
- IRQF_DISABLED, pdev->name, ndev);
+ 0, pdev->name, ndev);
if (ret)
goto failed_irq;
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index e3c7c69..91227d0 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1097,7 +1097,7 @@ static int hp100_open(struct net_device *dev)
/* New: if bus is PCI or EISA, interrupts might be shared interrupts */
if (request_irq(dev->irq, hp100_interrupt,
lp->bus == HP100_BUS_PCI || lp->bus ==
- HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED,
+ HP100_BUS_EISA ? IRQF_SHARED : 0,
"hp100", dev)) {
printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
return -EAGAIN;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 35853b4..2d1c6bd 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -102,6 +102,19 @@ static int ehea_probe_adapter(struct platform_device *dev);
static int ehea_remove(struct platform_device *dev);
+static struct of_device_id ehea_module_device_table[] = {
+ {
+ .name = "lhea",
+ .compatible = "IBM,lhea",
+ },
+ {
+ .type = "network",
+ .compatible = "IBM,lhea-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehea_module_device_table);
+
static struct of_device_id ehea_device_table[] = {
{
.name = "lhea",
@@ -109,7 +122,6 @@ static struct of_device_id ehea_device_table[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, ehea_device_table);
static struct platform_driver ehea_driver = {
.driver = {
@@ -1285,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
ehea_qp_aff_irq_handler,
- IRQF_DISABLED, port->int_aff_name, port);
+ 0, port->int_aff_name, port);
if (ret) {
netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
port->qp_eq->attr.ist1);
@@ -1303,8 +1315,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
"%s-queue%d", dev->name, i);
ret = ibmebus_request_irq(pr->eq->attr.ist1,
ehea_recv_irq_handler,
- IRQF_DISABLED, pr->int_send_name,
- pr);
+ 0, pr->int_send_name, pr);
if (ret) {
netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
i, pr->eq->attr.ist1);
@@ -3320,7 +3331,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
}
ret = ibmebus_request_irq(adapter->neq->attr.ist1,
- ehea_interrupt_neq, IRQF_DISABLED,
+ ehea_interrupt_neq, 0,
"ehea_neq", adapter);
if (ret) {
dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index f0e7ed2..149ac85 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -241,4 +241,22 @@ config IXGBEVF
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
+config I40E
+ tristate "Intel(R) Ethernet Controller XL710 Family support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) Ethernet Controller XL710 Family of
+ devices. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called i40e.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index c8210e6..5bae933 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
+obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index a8633b8..d14c8f5 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -922,6 +922,14 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
else
mask &= ~(1 << 30);
}
+ if (mac->type == e1000_pch2lan) {
+ /* SHRAH[0,1,2] different than previous */
+ if (i == 7)
+ mask &= 0xFFF4FFFF;
+ /* SHRAH[3] different than SHRAH[0,1,2] */
+ if (i == 10)
+ mask |= (1 << 30);
+ }
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
0xFFFFFFFF);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index af08188..42f0f67 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1371,7 +1371,10 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
return;
}
- if (index < hw->mac.rar_entry_count) {
+ /* RAR[1-6] are owned by manageability. Skip those and program the
+ * next address into the SHRA register array.
+ */
+ if (index < (u32)(hw->mac.rar_entry_count - 6)) {
s32 ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1962,8 +1965,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
- /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count); i++) {
mac_reg = er32(RAL(i));
hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
(u16)(mac_reg & 0xFFFF));
@@ -2007,10 +2010,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
if (enable) {
- /* Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ /* Write Rx addresses (rar_entry_count for RAL/H, and
* SHRAL/H) and initial CRC values to the MAC
*/
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
u8 mac_addr[ETH_ALEN] = { 0 };
u32 addr_high, addr_low;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 5986569..217090d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -98,7 +98,7 @@
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
#define PHY_PAGE_SHIFT 5
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e87e9b0..4ef7867 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4868,7 +4868,7 @@ static void e1000_watchdog_task(struct work_struct *work)
*/
if ((hw->phy.type == e1000_phy_igp_3 ||
hw->phy.type == e1000_phy_bm) &&
- (hw->mac.autoneg == true) &&
+ hw->mac.autoneg &&
(adapter->link_speed == SPEED_10 ||
adapter->link_speed == SPEED_100) &&
(adapter->link_duplex == HALF_DUPLEX)) {
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
new file mode 100644
index 0000000..479b2c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -0,0 +1,44 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
+#
+
+obj-$(CONFIG_I40E) += i40e.o
+
+i40e-objs := i40e_main.o \
+ i40e_ethtool.o \
+ i40e_adminq.o \
+ i40e_common.o \
+ i40e_hmc.o \
+ i40e_lan_hmc.o \
+ i40e_nvm.o \
+ i40e_debugfs.o \
+ i40e_diag.o \
+ i40e_txrx.o \
+ i40e_virtchnl_pf.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
new file mode 100644
index 0000000..b5252eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -0,0 +1,558 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_H_
+#define _I40E_H_
+
+#include <net/tcp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#include <linux/version.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include "i40e_type.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+#include "i40e_virtchnl_pf.h"
+#include "i40e_txrx.h"
+
+/* Useful i40e defaults */
+#define I40E_BASE_PF_SEID 16
+#define I40E_BASE_VSI_SEID 512
+#define I40E_BASE_VEB_SEID 288
+#define I40E_MAX_VEB 16
+
+#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_REGISTER 0x0038FFFF
+#define I40E_DEFAULT_NUM_DESCRIPTORS 512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40E_MIN_NUM_DESCRIPTORS 64
+#define I40E_MIN_MSIX 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
+#define I40E_DEFAULT_QUEUES_PER_VF 4
+#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_FDIR_RING 0
+#define I40E_FDIR_RING_COUNT 32
+#define I40E_MAX_AQ_BUF_SIZE 4096
+#define I40E_AQ_LEN 32
+#define I40E_AQ_WORK_LIMIT 16
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_MSG_ENABLE 4
+
+#define I40E_NVM_VERSION_LO_SHIFT 0
+#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_MID_SHIFT 4
+#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+
+/* magic for getting defines into strings */
+#define STRINGIFY(foo) #foo
+#define XSTRINGIFY(bar) STRINGIFY(bar)
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#define I40E_RX_DESC(R, i) \
+ ((ring_is_16byte_desc_enabled(R)) \
+ ? (union i40e_32byte_rx_desc *) \
+ (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
+ : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+#define I40E_TX_DESC(R, i) \
+ (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define I40E_TX_FDIRDESC(R, i) \
+ (&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
+
+/* default to trying for four seconds */
+#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+
+/* driver state flags */
+enum i40e_state_t {
+ __I40E_TESTING,
+ __I40E_CONFIG_BUSY,
+ __I40E_CONFIG_DONE,
+ __I40E_DOWN,
+ __I40E_NEEDS_RESTART,
+ __I40E_SERVICE_SCHED,
+ __I40E_ADMINQ_EVENT_PENDING,
+ __I40E_MDD_EVENT_PENDING,
+ __I40E_VFLR_EVENT_PENDING,
+ __I40E_RESET_RECOVERY_PENDING,
+ __I40E_RESET_INTR_RECEIVED,
+ __I40E_REINIT_REQUESTED,
+ __I40E_PF_RESET_REQUESTED,
+ __I40E_CORE_RESET_REQUESTED,
+ __I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_FILTER_OVERFLOW_PROMISC,
+};
+
+enum i40e_interrupt_policy {
+ I40E_INTERRUPT_BEST_CASE,
+ I40E_INTERRUPT_MEDIUM,
+ I40E_INTERRUPT_LOWEST
+};
+
+struct i40e_lump_tracking {
+ u16 num_entries;
+ u16 search_hint;
+ u16 list[0];
+#define I40E_PILE_VALID_BIT 0x8000
+};
+
+#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
+#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
+struct i40e_fdir_data {
+ u16 q_index;
+ u8 flex_off;
+ u8 pctype;
+ u16 dest_vsi;
+ u8 dest_ctl;
+ u8 fd_status;
+ u16 cnt_index;
+ u32 fd_id;
+ u8 *raw_packet;
+};
+
+#define I40E_DCB_PRIO_TYPE_STRICT 0
+#define I40E_DCB_PRIO_TYPE_ETS 1
+#define I40E_DCB_STRICT_PRIO_CREDITS 127
+#define I40E_MAX_USER_PRIORITY 8
+/* DCB per TC information data structure */
+struct i40e_tc_info {
+ u16 qoffset; /* Queue offset from base queue */
+ u16 qcount; /* Total Queues */
+ u8 netdev_tc; /* Netdev TC index if netdev associated */
+};
+
+/* TC configuration data structure */
+struct i40e_tc_configuration {
+ u8 numtc; /* Total number of enabled TCs */
+ u8 enabled_tc; /* TC map */
+ struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* struct that defines the Ethernet device */
+struct i40e_pf {
+ struct pci_dev *pdev;
+ struct i40e_hw hw;
+ unsigned long state;
+ unsigned long link_check_timeout;
+ struct msix_entry *msix_entries;
+ u16 num_msix_entries;
+ bool fc_autoneg_status;
+
+ u16 eeprom_version;
+ u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */
+ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
+ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
+ u16 num_req_vfs; /* num vfs requested for this vf */
+ u16 num_vf_qps; /* num queue pairs per vf */
+ u16 num_tc_qps; /* num queue pairs per TC */
+ u16 num_lan_qps; /* num lan queues this pf has set up */
+ u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ u16 rss_size; /* num queues in the RSS array */
+ u16 rss_size_max; /* HW defined max RSS queues */
+ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
+ u8 atr_sample_rate;
+
+ enum i40e_interrupt_policy int_policy;
+ u16 rx_itr_default;
+ u16 tx_itr_default;
+ u16 msg_enable;
+ char misc_int_name[IFNAMSIZ + 9];
+ u16 adminq_work_limit; /* num of admin receive queue desc to process */
+ int service_timer_period;
+ struct timer_list service_timer;
+ struct work_struct service_task;
+
+ u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
+#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
+#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
+#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
+#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
+#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
+#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
+#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
+#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
+#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
+#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
+#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
+#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
+#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
+#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
+#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
+#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
+#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
+#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
+
+ u16 num_tx_queues;
+ u16 num_rx_queues;
+
+ bool stat_offsets_loaded;
+ struct i40e_hw_port_stats stats;
+ struct i40e_hw_port_stats stats_offsets;
+ u32 tx_timeout_count;
+ u32 tx_timeout_recovery_level;
+ unsigned long tx_timeout_last_recovery;
+ u32 hw_csum_rx_error;
+ u32 led_status;
+ u16 corer_count; /* Core reset count */
+ u16 globr_count; /* Global reset count */
+ u16 empr_count; /* EMP reset count */
+ u16 pfr_count; /* PF reset count */
+
+ struct mutex switch_mutex;
+ u16 lan_vsi; /* our default LAN VSI */
+ u16 lan_veb; /* initial relay, if exists */
+#define I40E_NO_VEB 0xffff
+#define I40E_NO_VSI 0xffff
+ u16 next_vsi; /* Next unallocated VSI - 0-based! */
+ struct i40e_vsi **vsi;
+ struct i40e_veb *veb[I40E_MAX_VEB];
+
+ struct i40e_lump_tracking *qp_pile;
+ struct i40e_lump_tracking *irq_pile;
+
+ /* switch config info */
+ u16 pf_seid;
+ u16 main_vsi_seid;
+ u16 mac_seid;
+ struct i40e_aqc_get_switch_config_data *sw_config;
+ struct kobject *switch_kobj;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *i40e_dbg_pf;
+#endif /* CONFIG_DEBUG_FS */
+
+ /* sr-iov config info */
+ struct i40e_vf *vf;
+ int num_alloc_vfs; /* actual number of VFs allocated */
+ u32 vf_aq_requests;
+
+ /* DCBx/DCBNL capability for PF that indicates
+ * whether DCBx is managed by firmware or host
+ * based agent (LLDPAD). Also, indicates what
+ * flavor of DCBx protocol (IEEE/CEE) is supported
+ * by the device. For now we're supporting IEEE
+ * mode only.
+ */
+ u16 dcbx_cap;
+
+ u32 fcoe_hmc_filt_num;
+ u32 fcoe_hmc_cntx_num;
+ struct i40e_filter_control_settings filter_settings;
+};
+
+struct i40e_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+#define I40E_VLAN_ANY -1
+ s16 vlan;
+ u8 counter; /* number of instances of this filter */
+ bool is_vf; /* filter belongs to a VF */
+ bool is_netdev; /* filter belongs to a netdev */
+ bool changed; /* filter needs to be sync'd to the HW */
+};
+
+struct i40e_veb {
+ struct i40e_pf *pf;
+ u16 idx;
+ u16 veb_idx; /* index of VEB parent */
+ u16 seid;
+ u16 uplink_seid;
+ u16 stats_idx; /* index of VEB parent */
+ u8 enabled_tc;
+ u16 flags;
+ u16 bw_limit;
+ u8 bw_max_quanta;
+ bool is_abs_credits;
+ u8 bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+ u8 bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+ struct kobject *kobj;
+ bool stat_offsets_loaded;
+ struct i40e_eth_stats stats;
+ struct i40e_eth_stats stats_offsets;
+};
+
+/* struct that defines a VSI, associated with a dev */
+struct i40e_vsi {
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ bool netdev_registered;
+ bool stat_offsets_loaded;
+
+ u32 current_netdev_flags;
+ unsigned long state;
+#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
+#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
+ unsigned long flags;
+
+ struct list_head mac_filter_list;
+
+ /* VSI stats */
+ struct rtnl_link_stats64 net_stats;
+ struct rtnl_link_stats64 net_stats_offsets;
+ struct i40e_eth_stats eth_stats;
+ struct i40e_eth_stats eth_stats_offsets;
+ u32 tx_restart;
+ u32 tx_busy;
+ u32 rx_buf_failed;
+ u32 rx_page_failed;
+
+ /* These are arrays of rings, allocated at run-time */
+ struct i40e_ring *rx_rings;
+ struct i40e_ring *tx_rings;
+
+ u16 work_limit;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
+ u16 max_frame;
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+
+ /* List of q_vectors allocated to this VSI */
+ struct i40e_q_vector *q_vectors;
+ int num_q_vectors;
+ int base_vector;
+
+ u16 seid; /* HW index of this VSI (absolute index) */
+ u16 id; /* VSI number */
+ u16 uplink_seid;
+
+ u16 base_queue; /* vsi's first queue in hw array */
+ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 num_queue_pairs; /* Used tx and rx pairs */
+ u16 num_desc;
+ enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
+ u16 vf_id; /* Virtual function ID for SRIOV VSIs */
+
+ struct i40e_tc_configuration tc_config;
+ struct i40e_aqc_vsi_properties_data info;
+
+ /* VSI BW limit (absolute across all TCs) */
+ u16 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 bw_max_quanta; /* Max Quanta when BW limit is enabled */
+
+ /* Relative TC credits across VSIs */
+ u8 bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* TC BW limit credits within VSI */
+ u16 bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* TC BW limit max quanta within VSI */
+ u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+
+ struct i40e_pf *back; /* Backreference to associated PF */
+ u16 idx; /* index in pf->vsi[] */
+ u16 veb_idx; /* index of VEB parent */
+ struct kobject *kobj; /* sysfs object */
+
+ /* VSI specific handlers */
+ irqreturn_t (*irq_handler)(int irq, void *data);
+} ____cacheline_internodealigned_in_smp;
+
+struct i40e_netdev_priv {
+ struct i40e_vsi *vsi;
+};
+
+/* struct that defines an interrupt vector */
+struct i40e_q_vector {
+ struct i40e_vsi *vsi;
+
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u16 reg_idx; /* register index of the interrupt */
+
+ struct napi_struct napi;
+
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+
+ char name[IFNAMSIZ + 9];
+ cpumask_t affinity_mask;
+} ____cacheline_internodealigned_in_smp;
+
+/* lan device */
+struct i40e_device {
+ struct list_head list;
+ struct i40e_pf *pf;
+};
+
+/**
+ * i40e_fw_version_str - format the FW and NVM version strings
+ * @hw: ptr to the hardware info
+ **/
+static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+{
+ static char buf[32];
+
+ snprintf(buf, sizeof(buf),
+ "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+ >> I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
+ >> I40E_NVM_VERSION_MID_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
+ >> I40E_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack);
+
+ return buf;
+}
+
+/**
+ * i40e_netdev_to_pf: Retrieve the PF struct for given netdev
+ * @netdev: the corresponding netdev
+ *
+ * Return the PF struct for the given netdev
+ **/
+static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ return vsi->back;
+}
+
+static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
+ irqreturn_t (*irq_handler)(int, void *))
+{
+ vsi->irq_handler = irq_handler;
+}
+
+/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qw: the first quad word of the program status descriptor
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qw)
+{
+ return I40E_RX_PROG_STATUS_DESC_LENGTH ==
+ (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
+}
+
+/* needed by i40e_ethtool.c */
+int i40e_up(struct i40e_vsi *vsi);
+void i40e_down(struct i40e_vsi *vsi);
+extern const char i40e_driver_name[];
+extern const char i40e_driver_version_str[];
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_eth_stats(struct i40e_vsi *vsi);
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+int i40e_fetch_switch_configuration(struct i40e_pf *pf,
+ bool printconfig);
+
+/* needed by i40e_main.c */
+void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+ struct i40e_pf *pf, bool add);
+
+void i40e_set_ethtool_ops(struct net_device *netdev);
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev);
+void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+ u16 uplink, u32 param1);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
+ struct i40e_vsi *start_vsi);
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc);
+void i40e_veb_release(struct i40e_veb *veb);
+
+i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
+void i40e_pf_reset_stats(struct i40e_pf *pf);
+#ifdef CONFIG_DEBUG_FS
+void i40e_dbg_pf_init(struct i40e_pf *pf);
+void i40e_dbg_pf_exit(struct i40e_pf *pf);
+void i40e_dbg_init(void);
+void i40e_dbg_exit(void);
+#else
+static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {}
+static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
+static inline void i40e_dbg_init(void) {}
+static inline void i40e_dbg_exit(void) {}
+#endif /* CONFIG_DEBUG_FS*/
+void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+
+#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
new file mode 100644
index 0000000..0c524fa
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -0,0 +1,983 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_virt_mem mem;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ hw->aq.asq.desc = hw->aq.asq_mem.va;
+ hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
+
+ ret_code = i40e_allocate_virt_mem(hw, &mem,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq_mem);
+ hw->aq.asq_mem.va = NULL;
+ hw->aq.asq_mem.pa = 0;
+ return ret_code;
+ }
+
+ hw->aq.asq.details = mem.va;
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ hw->aq.arq.desc = hw->aq.arq_mem.va;
+ hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+
+ i40e_free_dma_mem(hw, &hw->aq.asq_mem);
+ hw->aq.asq_mem.va = NULL;
+ hw->aq.asq_mem.pa = 0;
+ mem.va = hw->aq.asq.details;
+ i40e_free_virt_mem(hw, &mem);
+ hw->aq.asq.details = NULL;
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq_mem);
+ hw->aq.arq_mem.va = NULL;
+ hw->aq.arq_mem.pa = 0;
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_virt_mem mem;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
+ sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ mem.va = hw->aq.arq.r.arq_bi;
+ i40e_free_virt_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_virt_mem mem;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
+ sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ mem.va = hw->aq.asq.r.asq_bi;
+ i40e_free_virt_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+ int i;
+
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ mem.va = hw->aq.arq.r.arq_bi;
+ i40e_free_virt_mem(hw, &mem);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* now free the buffer info list */
+ mem.va = hw->aq.asq.r.asq_bi;
+ i40e_free_virt_mem(hw, &mem);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the transmit queue */
+ wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ } else {
+ /* configure the transmit queue */
+ wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ }
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the receive queue */
+ wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ } else {
+ /* configure the receive queue */
+ wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ }
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_asq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_arq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ if (hw->mac.type == I40E_MAC_VF)
+ wr32(hw, I40E_VF_ATQLEN1, 0);
+ else
+ wr32(hw, I40E_PF_ATQLEN, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.asq_mutex);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+ /* free the ring descriptors */
+ i40e_free_adminq_asq(hw);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ if (hw->mac.type == I40E_MAC_VF)
+ wr32(hw, I40E_VF_ARQLEN1, 0);
+ else
+ wr32(hw, I40E_PF_ARQLEN, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+ /* free the ring descriptors */
+ i40e_free_adminq_arq(hw);
+
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+i40e_status i40e_init_adminq(struct i40e_hw *hw)
+{
+ u16 eetrack_lo, eetrack_hi;
+ i40e_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize locks */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code)
+ goto init_adminq_free_arq;
+
+ if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+ i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ ret_code = i40e_aq_set_hmc_resource_profile(hw,
+ I40E_HMC_PROFILE_DEFAULT,
+ 0,
+ NULL);
+ ret_code = 0;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the locks */
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @asq: pointer to the adminq send ring
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+
+}
+
+/**
+ * i40e_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @opaque: pointer to info to be used in async cleanup
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status = 0;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ memcpy(details, cmd_details,
+ sizeof(struct i40e_asq_cmd_details));
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc));
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+ u32 delay_len = 10;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ udelay(delay_len);
+ total_delay += delay_len;
+ } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc));
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ i40e_status ret_code = 0;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+ i40e_debug_aq(hw,
+ I40E_DEBUG_AQ_COMMAND,
+ (void *)desc,
+ hw->aq.arq.r.arq_bi[desc_idx].va);
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ } else {
+ memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_size = min(datalen, e->msg_size);
+ if (e->msg_buf != NULL && (e->msg_size != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_size);
+ }
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+ u32 reg = 0;
+
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+ reg = hw->aq.num_asq_entries;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_VF_ATQLEN1, reg);
+ } else {
+ reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_PF_ATQLEN, reg);
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+ reg = hw->aq.num_arq_entries;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_VF_ARQLEN1, reg);
+ } else {
+ reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_PF_ARQLEN, reg);
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
new file mode 100644
index 0000000..22e5ed6
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -0,0 +1,112 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ void *desc; /* Descriptor ring memory */
+ void *details; /* ASQ details */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u64 dma_addr; /* Physical address of the ring */
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_size;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
+ struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
new file mode 100644
index 0000000..e61ebdd
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -0,0 +1,2076 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0000
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106,
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ i40e_aqc_opc_set_storm_control_config = 0x0280,
+ i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
+ i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (direct 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
+ * use the generic i40e_aqc_switch_seid descriptor format
+ * use the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+ __le32 key_low;
+ __le32 key_high;
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+ __le32 broadcast_threshold;
+ __le32 multicast_threshold;
+ __le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
+#define I40E_AQC_STORM_CONTROL_MDICW 0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
+#define I40E_AQC_STORM_CONTROL_BDICW 0x08
+#define I40E_AQC_STORM_CONTROL_BIDU 0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
+ I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_flags;
+ u8 reserved2[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
+#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+ u8 reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST 0x02
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 header_len; /* in DWords, 1 to 15 */
+ u8 protocol_index;
+#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
+#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 pf_filters;
+ u8 total_filters;
+ u8 reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_entries;
+ u8 tunnels_used;
+ u8 reserved;
+ u8 tunnels_free;
+ u8 reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+struct i40e_aqc_tunnel_key_structure {
+ __le16 key1_off;
+ __le16 key1_len;
+ __le16 key2_off;
+ __le16 key2_len;
+ __le16 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
new file mode 100644
index 0000000..3b1cc21
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -0,0 +1,59 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
new file mode 100644
index 0000000..c21df7b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -0,0 +1,2041 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_SFP_XL710_DEVICE_ID:
+ case I40E_SFP_X710_DEVICE_ID:
+ case I40E_QEMU_DEVICE_ID:
+ case I40E_KX_A_DEVICE_ID:
+ case I40E_KX_B_DEVICE_ID:
+ case I40E_KX_C_DEVICE_ID:
+ case I40E_KX_D_DEVICE_ID:
+ case I40E_QSFP_A_DEVICE_ID:
+ case I40E_QSFP_B_DEVICE_ID:
+ case I40E_QSFP_C_DEVICE_ID:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_VF_DEVICE_ID:
+ case I40E_VF_HV_DEVICE_ID:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @cap: pointer to adminq command descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ memset(data, 0, sizeof(data));
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ memset(data, 0, sizeof(data));
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+ u32 reg;
+
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = le16_to_cpu(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = cpu_to_le16(flags);
+ memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
+ memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ i40e_status status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+ return status;
+}
+
+/**
+ * i40e_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+{
+ i40e_status status = 0;
+
+ /* Make sure it is not a multicast address */
+ if (I40E_IS_MULTICAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is multicast\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ } else if (I40E_IS_BROADCAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is broadcast\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ hw_dbg(hw, "MAC address is all zeros\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ }
+ return status;
+}
+
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+i40e_status i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 wait_cnt = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
+ >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ msleep(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Determine the PF number based on the PCI fn */
+ hw->pf_id = (u8)hw->bus.func;
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!wait_cnt) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ hw_dbg(hw, "PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+ return 0;
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ /* Clear single descriptor fetch/write-back mode */
+ reg = rd32(hw, I40E_GLLAN_RCTL_0);
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+}
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 gpio_val = 0;
+ u32 mode = 0;
+ u32 port;
+ int i;
+
+ for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
+ if (!hw->func_caps.led[i])
+ continue;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
+ >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ if (port != hw->port)
+ continue;
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, else on (see EAS for mode details)
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode)
+{
+ u32 gpio_val = 0;
+ u32 led_mode = 0;
+ u32 port;
+ int i;
+
+ for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
+ if (!hw->func_caps.led[i])
+ continue;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
+ >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ if (port != hw->port)
+ continue;
+
+ led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ gpio_val |= led_mode;
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ }
+}
+
+/* Admin command wrappers */
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ i40e_status status;
+ u16 command_flags;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = cpu_to_le16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ memcpy(&hw->phy.link_info_old, hw_link_info,
+ sizeof(struct i40e_link_status));
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->ext_info = resp->ext_info;
+
+ if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
+ hw_link_info->lse_enable = true;
+ else
+ hw_link_info->lse_enable = false;
+
+ /* save link status information */
+ if (link)
+ memcpy(link, hw_link_info, sizeof(struct i40e_link_status));
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ if (status)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = le16_to_cpu(resp->seid);
+ vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->seid = cpu_to_le16(vsi_ctx->seid);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = le16_to_cpu(resp->seid);
+ vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->seid = cpu_to_le16(vsi_ctx->seid);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = cpu_to_le16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = le16_to_cpu(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (fw_major_version != NULL)
+ *fw_major_version = le16_to_cpu(resp->fw_major);
+ if (fw_minor_version != NULL)
+ *fw_minor_version = le16_to_cpu(resp->fw_minor);
+ if (api_major_version != NULL)
+ *api_major_version = le16_to_cpu(resp->api_major);
+ if (api_minor_version != NULL)
+ *api_minor_version = le16_to_cpu(resp->api_minor);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @event: driver event: driver ok, start or stop
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ i40e_status status;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if link is up, false if link is down.
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+bool i40e_get_link_status(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+ bool link_status = false;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+ if (status)
+ goto i40e_get_link_status_exit;
+ }
+
+ link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+i40e_get_link_status_exit:
+ return link_status;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ i40e_status status;
+ u16 veb_flags = 0;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = cpu_to_le16(uplink_seid);
+ cmd->downlink_seid = cpu_to_le16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+ cmd->veb_flags = cpu_to_le16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = le16_to_cpu(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating_veb: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_unallocated: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = cpu_to_le16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = le16_to_cpu(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = true;
+ else
+ *floating = false;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = cpu_to_le32(vfid);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *cmd =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_hmc_resource_profile);
+
+ cmd->pm_profile = (u8)profile;
+ cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = cpu_to_le16(resource);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->resource_number = cpu_to_le32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = cpu_to_le16(resource);
+ cmd->resource_number = cpu_to_le32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ i40e_status status;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = cpu_to_le32(offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02
+#define I40E_DEV_FUNC_CAP_NPAR 0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
+#define I40E_DEV_FUNC_CAP_VF 0x13
+#define I40E_DEV_FUNC_CAP_VMDQ 0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16
+#define I40E_DEV_FUNC_CAP_VSI 0x17
+#define I40E_DEV_FUNC_CAP_DCB 0x18
+#define I40E_DEV_FUNC_CAP_FCOE 0x21
+#define I40E_DEV_FUNC_CAP_RSS 0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
+#define I40E_DEV_FUNC_CAP_MSIX 0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
+#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_CEM 0xF2
+#define I40E_DEV_FUNC_CAP_IWARP 0x51
+#define I40E_DEV_FUNC_CAP_LED 0x61
+#define I40E_DEV_FUNC_CAP_SDP 0x62
+#define I40E_DEV_FUNC_CAP_MDIO 0x63
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u32 reg_val;
+ u32 i = 0;
+ u16 id;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = le16_to_cpu(cap->id);
+ number = le32_to_cpu(cap->number);
+ logical_id = le32_to_cpu(cap->logical_id);
+ phys_id = le32_to_cpu(cap->phys_id);
+
+ switch (id) {
+ case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+ p->switch_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MGMT_MODE:
+ p->management_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_NPAR:
+ p->npar_enable = number;
+ break;
+ case I40E_DEV_FUNC_CAP_OS2BMC:
+ p->os2bmc = number;
+ break;
+ case I40E_DEV_FUNC_CAP_VALID_FUNC:
+ p->valid_functions = number;
+ break;
+ case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+ if (number == 1)
+ p->sr_iov_1_1 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_VMDQ:
+ if (number == 1)
+ p->vmdq = true;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = true;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBH:
+ if (number == 1)
+ p->evb_802_1_qbh = true;
+ break;
+ case I40E_DEV_FUNC_CAP_VSI:
+ p->num_vsis = number;
+ break;
+ case I40E_DEV_FUNC_CAP_DCB:
+ if (number == 1) {
+ p->dcb = true;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_FCOE:
+ if (number == 1)
+ p->fcoe = true;
+ break;
+ case I40E_DEV_FUNC_CAP_RSS:
+ p->rss = true;
+ reg_val = rd32(hw, I40E_PFQF_CTL_0);
+ if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)
+ p->rss_table_size = number;
+ else
+ p->rss_table_size = 128;
+ p->rss_table_entry_width = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_RX_QUEUES:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_TX_QUEUES:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX:
+ p->num_msix_vectors = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX_VF:
+ p->num_msix_vectors_vf = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MFP_MODE_1:
+ if (number == 1)
+ p->mfp_mode_1 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_CEM:
+ if (number == 1)
+ p->mgmt_cem = true;
+ break;
+ case I40E_DEV_FUNC_CAP_IWARP:
+ if (number == 1)
+ p->iwarp = true;
+ break;
+ case I40E_DEV_FUNC_CAP_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = true;
+ break;
+ case I40E_DEV_FUNC_CAP_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = true;
+ break;
+ case I40E_DEV_FUNC_CAP_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_IEEE_1588:
+ if (number == 1)
+ p->ieee_1588 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+ p->fd = true;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ i40e_status status = 0;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = le16_to_cpu(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ i40e_status status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = le16_to_cpu(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = le16_to_cpu(resp->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ i40e_status status;
+ bool cmd_param_flag = false;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = true;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = false;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns 0 if the values passed are valid and within
+ * range else returns an error.
+ **/
+static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 pe_cntx_size, pe_filt_size;
+ u32 fcoe_fmax, pe_fmax;
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ pe_filt_size <<= (u32)settings->pe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ pe_cntx_size <<= (u32)settings->pe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
+ val = rd32(hw, I40E_GLHMC_PEXFMAX);
+ pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
+ >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
+ if (pe_filt_size + pe_cntx_size > pe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return 0;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ i40e_status ret = 0;
+ u32 hash_lut_size = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = rd32(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ wr32(hw, I40E_PFQF_CTL_0, val);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
new file mode 100644
index 0000000..8dbd91f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -0,0 +1,2076 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#include "i40e.h"
+
+static struct dentry *i40e_dbg_root;
+
+/**
+ * i40e_dbg_find_vsi - searches for the vsi with the given seid
+ * @pf - the pf structure to search for the vsi
+ * @seid - seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
+{
+ int i;
+
+ if (seid < 0)
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ else
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * i40e_dbg_find_veb - searches for the veb with the given seid
+ * @pf - the pf structure to search for the veb
+ * @seid - seid of the veb it is searching for
+ **/
+static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
+{
+ int i;
+
+ if ((seid < I40E_BASE_VEB_SEID) ||
+ (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ else
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == seid)
+ return pf->veb[i];
+ return NULL;
+}
+
+/**************************************************************
+ * dump
+ * The dump entry in debugfs is for getting a data snapshow of
+ * the driver's current configuration and runtime details.
+ * When the filesystem entry is written, a snapshot is taken.
+ * When the entry is read, the most recent snapshot data is dumped.
+ **************************************************************/
+static char *i40e_dbg_dump_buf;
+static ssize_t i40e_dbg_dump_data_len;
+static ssize_t i40e_dbg_dump_buffer_len;
+
+/**
+ * i40e_dbg_dump_read - read the dump data
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int bytes_not_copied;
+ int len;
+
+ /* is *ppos bigger than the available data? */
+ if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
+ return 0;
+
+ /* be sure to not read beyond the end of available data */
+ len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
+
+ bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos += len;
+ return len;
+}
+
+/**
+ * i40e_dbg_prep_dump_buf
+ * @pf: the pf we're working with
+ * @buflen: the desired buffer length
+ *
+ * Return positive if success, 0 if failed
+ **/
+static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
+{
+ /* if not already big enough, prep for re alloc */
+ if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buffer_len = 0;
+ i40e_dbg_dump_buf = NULL;
+ }
+
+ /* get a new buffer if needed */
+ if (!i40e_dbg_dump_buf) {
+ i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
+ if (i40e_dbg_dump_buf != NULL)
+ i40e_dbg_dump_buffer_len = buflen;
+ }
+
+ return i40e_dbg_dump_buffer_len;
+}
+
+/**
+ * i40e_dbg_dump_write - trigger a datadump snapshot
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ *
+ * Any write clears the stats
+ **/
+static ssize_t i40e_dbg_dump_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ char dump_request_buf[16];
+ bool seid_found = false;
+ int bytes_not_copied;
+ long seid = -1;
+ int buflen = 0;
+ int i, ret;
+ int len;
+ u8 *p;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(dump_request_buf))
+ return -ENOSPC;
+
+ bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ dump_request_buf[count] = '\0';
+
+ /* decode the SEID given to be dumped */
+ ret = kstrtol(dump_request_buf, 0, &seid);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
+ dump_request_buf);
+ } else if (seid == 0) {
+ seid_found = true;
+
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buffer_len = 0;
+ i40e_dbg_dump_data_len = 0;
+ i40e_dbg_dump_buf = NULL;
+ dev_info(&pf->pdev->dev, "debug buffer freed\n");
+
+ } else if (seid == pf->pf_seid || seid == 1) {
+ seid_found = true;
+
+ buflen = sizeof(struct i40e_pf);
+ buflen += (sizeof(struct i40e_aq_desc)
+ * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
+
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ p = i40e_dbg_dump_buf;
+
+ len = sizeof(struct i40e_pf);
+ memcpy(p, pf, len);
+ p += len;
+
+ len = (sizeof(struct i40e_aq_desc)
+ * pf->hw.aq.num_asq_entries);
+ memcpy(p, pf->hw.aq.asq.desc, len);
+ p += len;
+
+ len = (sizeof(struct i40e_aq_desc)
+ * pf->hw.aq.num_arq_entries);
+ memcpy(p, pf->hw.aq.arq.desc, len);
+ p += len;
+
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "PF seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ } else if (seid >= I40E_BASE_VSI_SEID) {
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_mac_filter *f;
+ int filter_count = 0;
+
+ mutex_lock(&pf->switch_mutex);
+ vsi = i40e_dbg_find_vsi(pf, seid);
+ if (!vsi) {
+ mutex_unlock(&pf->switch_mutex);
+ goto write_exit;
+ }
+
+ buflen = sizeof(struct i40e_vsi);
+ buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
+ buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
+ buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
+ buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ filter_count++;
+ buflen += sizeof(struct i40e_mac_filter) * filter_count;
+
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ p = i40e_dbg_dump_buf;
+ seid_found = true;
+
+ len = sizeof(struct i40e_vsi);
+ memcpy(p, vsi, len);
+ p += len;
+
+ len = (sizeof(struct i40e_q_vector)
+ * vsi->num_q_vectors);
+ memcpy(p, vsi->q_vectors, len);
+ p += len;
+
+ len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
+ memcpy(p, vsi->tx_rings, len);
+ p += len;
+ memcpy(p, vsi->rx_rings, len);
+ p += len;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ len = sizeof(struct i40e_tx_buffer);
+ memcpy(p, vsi->tx_rings[i].tx_bi, len);
+ p += len;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ len = sizeof(struct i40e_rx_buffer);
+ memcpy(p, vsi->rx_rings[i].rx_bi, len);
+ p += len;
+ }
+
+ /* macvlan filter list */
+ len = sizeof(struct i40e_mac_filter);
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ memcpy(p, f, len);
+ p += len;
+ }
+
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "VSI seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ mutex_unlock(&pf->switch_mutex);
+ } else if (seid >= I40E_BASE_VEB_SEID) {
+ struct i40e_veb *veb = NULL;
+
+ mutex_lock(&pf->switch_mutex);
+ veb = i40e_dbg_find_veb(pf, seid);
+ if (!veb) {
+ mutex_unlock(&pf->switch_mutex);
+ goto write_exit;
+ }
+
+ buflen = sizeof(struct i40e_veb);
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ seid_found = true;
+ memcpy(i40e_dbg_dump_buf, veb, buflen);
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "VEB seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ mutex_unlock(&pf->switch_mutex);
+ }
+
+write_exit:
+ if (!seid_found)
+ dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
+
+ return count;
+}
+
+static const struct file_operations i40e_dbg_dump_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_dump_read,
+ .write = i40e_dbg_dump_write,
+};
+
+/**************************************************************
+ * command
+ * The command entry in debugfs is for giving the driver commands
+ * to be executed - these may be for changing the internal switch
+ * setup, adding or removing filters, or other things. Many of
+ * these will be useful for some forms of unit testing.
+ **************************************************************/
+static char i40e_dbg_command_buf[256] = "hello world";
+
+/**
+ * i40e_dbg_command_read - read for command datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ int buf_size = 256;
+ char *buf;
+ int len;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+ if (count < buf_size)
+ return -ENOSPC;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ len = snprintf(buf, buf_size, "%s: %s\n",
+ pf->vsi[pf->lan_vsi]->netdev->name,
+ i40e_dbg_command_buf);
+
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ kfree(buf);
+
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
+{
+ struct rtnl_link_stats64 *nstat;
+ struct i40e_mac_filter *f;
+ struct i40e_vsi *vsi;
+ int i;
+
+ vsi = i40e_dbg_find_vsi(pf, seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "dump %d: seid not found\n", seid);
+ return;
+ }
+ dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
+ if (vsi->netdev)
+ dev_info(&pf->pdev->dev,
+ " netdev: name = %s\n",
+ vsi->netdev->name);
+ if (vsi->active_vlans)
+ dev_info(&pf->pdev->dev,
+ " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev,
+ " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
+ vsi->netdev_registered,
+ vsi->current_netdev_flags, vsi->state, vsi->flags);
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ dev_info(&pf->pdev->dev,
+ " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
+ f->macaddr, f->vlan, f->is_netdev, f->is_vf,
+ f->counter);
+ }
+ nstat = i40e_get_vsi_stats_struct(vsi);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+ (long unsigned int)nstat->rx_packets,
+ (long unsigned int)nstat->rx_bytes,
+ (long unsigned int)nstat->rx_errors,
+ (long unsigned int)nstat->rx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+ (long unsigned int)nstat->tx_packets,
+ (long unsigned int)nstat->tx_bytes,
+ (long unsigned int)nstat->tx_errors,
+ (long unsigned int)nstat->tx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats: multicast = %lu, collisions = %lu\n",
+ (long unsigned int)nstat->multicast,
+ (long unsigned int)nstat->collisions);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+ (long unsigned int)nstat->rx_length_errors,
+ (long unsigned int)nstat->rx_over_errors,
+ (long unsigned int)nstat->rx_crc_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+ (long unsigned int)nstat->rx_frame_errors,
+ (long unsigned int)nstat->rx_fifo_errors,
+ (long unsigned int)nstat->rx_missed_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+ (long unsigned int)nstat->tx_aborted_errors,
+ (long unsigned int)nstat->tx_carrier_errors,
+ (long unsigned int)nstat->tx_fifo_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+ (long unsigned int)nstat->tx_heartbeat_errors,
+ (long unsigned int)nstat->tx_window_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
+ (long unsigned int)nstat->rx_compressed,
+ (long unsigned int)nstat->tx_compressed);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_packets,
+ (long unsigned int)vsi->net_stats_offsets.rx_bytes,
+ (long unsigned int)vsi->net_stats_offsets.rx_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_packets,
+ (long unsigned int)vsi->net_stats_offsets.tx_bytes,
+ (long unsigned int)vsi->net_stats_offsets.tx_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: multicast = %lu, collisions = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.multicast,
+ (long unsigned int)vsi->net_stats_offsets.collisions);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_compressed,
+ (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+ dev_info(&pf->pdev->dev,
+ " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ vsi->tx_restart, vsi->tx_busy,
+ vsi->rx_buf_failed, vsi->rx_page_failed);
+ if (vsi->rx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: desc = %p\n",
+ i, vsi->rx_rings[i].desc);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+ i, vsi->rx_rings[i].dev,
+ vsi->rx_rings[i].netdev,
+ vsi->rx_rings[i].rx_bi);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, vsi->rx_rings[i].state,
+ vsi->rx_rings[i].queue_index,
+ vsi->rx_rings[i].reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+ i, vsi->rx_rings[i].rx_hdr_len,
+ vsi->rx_rings[i].rx_buf_len,
+ vsi->rx_rings[i].dtype);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, vsi->rx_rings[i].hsplit,
+ vsi->rx_rings[i].next_to_use,
+ vsi->rx_rings[i].next_to_clean,
+ vsi->rx_rings[i].ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, vsi->rx_rings[i].rx_stats.packets,
+ vsi->rx_rings[i].rx_stats.bytes,
+ vsi->rx_rings[i].rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ i,
+ vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
+ vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, vsi->rx_rings[i].size,
+ (long unsigned int)vsi->rx_rings[i].dma);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, vsi->rx_rings[i].vsi,
+ vsi->rx_rings[i].q_vector);
+ }
+ }
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: desc = %p\n",
+ i, vsi->tx_rings[i].desc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+ i, vsi->tx_rings[i].dev,
+ vsi->tx_rings[i].netdev,
+ vsi->tx_rings[i].tx_bi);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, vsi->tx_rings[i].state,
+ vsi->tx_rings[i].queue_index,
+ vsi->tx_rings[i].reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dtype = %d\n",
+ i, vsi->tx_rings[i].dtype);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, vsi->tx_rings[i].hsplit,
+ vsi->tx_rings[i].next_to_use,
+ vsi->tx_rings[i].next_to_clean,
+ vsi->tx_rings[i].ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, vsi->tx_rings[i].tx_stats.packets,
+ vsi->tx_rings[i].tx_stats.bytes,
+ vsi->tx_rings[i].tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
+ i,
+ vsi->tx_rings[i].tx_stats.tx_busy,
+ vsi->tx_rings[i].tx_stats.completed,
+ vsi->tx_rings[i].tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, vsi->tx_rings[i].size,
+ (long unsigned int)vsi->tx_rings[i].dma);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, vsi->tx_rings[i].vsi,
+ vsi->tx_rings[i].q_vector);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, vsi->tx_rings[i].dcb_tc);
+ }
+ }
+ dev_info(&pf->pdev->dev,
+ " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
+ vsi->work_limit, vsi->rx_itr_setting,
+ ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed",
+ vsi->tx_itr_setting,
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
+ dev_info(&pf->pdev->dev,
+ " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
+ vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+ if (vsi->q_vectors) {
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ dev_info(&pf->pdev->dev,
+ " q_vectors[%i]: base index = %ld\n",
+ i, ((long int)*vsi->q_vectors[i].rx.ring-
+ (long int)*vsi->q_vectors[0].rx.ring)/
+ sizeof(struct i40e_ring));
+ }
+ }
+ dev_info(&pf->pdev->dev,
+ " num_q_vectors = %i, base_vector = %i\n",
+ vsi->num_q_vectors, vsi->base_vector);
+ dev_info(&pf->pdev->dev,
+ " seid = %d, id = %d, uplink_seid = %d\n",
+ vsi->seid, vsi->id, vsi->uplink_seid);
+ dev_info(&pf->pdev->dev,
+ " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
+ vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
+ dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
+ dev_info(&pf->pdev->dev,
+ " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
+ vsi->info.valid_sections, vsi->info.switch_id);
+ dev_info(&pf->pdev->dev,
+ " info: sw_reserved[] = 0x%02x 0x%02x\n",
+ vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
+ dev_info(&pf->pdev->dev,
+ " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
+ vsi->info.sec_flags, vsi->info.sec_reserved);
+ dev_info(&pf->pdev->dev,
+ " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
+ vsi->info.pvid, vsi->info.fcoe_pvid,
+ vsi->info.port_vlan_flags);
+ dev_info(&pf->pdev->dev,
+ " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
+ vsi->info.pvlan_reserved[2]);
+ dev_info(&pf->pdev->dev,
+ " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
+ vsi->info.ingress_table, vsi->info.egress_table);
+ dev_info(&pf->pdev->dev,
+ " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
+ vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
+ vsi->info.cas_pv_reserved);
+ dev_info(&pf->pdev->dev,
+ " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
+ vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
+ vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
+ vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
+ dev_info(&pf->pdev->dev,
+ " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
+ vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
+ vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
+ vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
+ dev_info(&pf->pdev->dev,
+ " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
+ vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
+ vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
+ vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
+ dev_info(&pf->pdev->dev,
+ " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.queueing_opt_flags,
+ vsi->info.queueing_opt_reserved[0],
+ vsi->info.queueing_opt_reserved[1],
+ vsi->info.queueing_opt_reserved[2]);
+ dev_info(&pf->pdev->dev,
+ " info: up_enable_bits = 0x%02x\n",
+ vsi->info.up_enable_bits);
+ dev_info(&pf->pdev->dev,
+ " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
+ vsi->info.sched_reserved, vsi->info.outer_up_table);
+ dev_info(&pf->pdev->dev,
+ " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
+ vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
+ vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
+ vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
+ vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
+ dev_info(&pf->pdev->dev,
+ " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.qs_handle[0], vsi->info.qs_handle[1],
+ vsi->info.qs_handle[2], vsi->info.qs_handle[3],
+ vsi->info.qs_handle[4], vsi->info.qs_handle[5],
+ vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
+ dev_info(&pf->pdev->dev,
+ " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
+ vsi->info.stat_counter_idx, vsi->info.sched_id);
+ dev_info(&pf->pdev->dev,
+ " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
+ vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
+ vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
+ vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
+ vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
+ vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
+ if (vsi->back)
+ dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back);
+ dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
+ dev_info(&pf->pdev->dev,
+ " tc_config: numtc = %d, enabled_tc = 0x%x\n",
+ vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev,
+ " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
+ i, vsi->tc_config.tc_info[i].qoffset,
+ vsi->tc_config.tc_info[i].qcount,
+ vsi->tc_config.tc_info[i].netdev_tc);
+ }
+ dev_info(&pf->pdev->dev,
+ " bw: bw_limit = %d, bw_max_quanta = %d\n",
+ vsi->bw_limit, vsi->bw_max_quanta);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev,
+ " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
+ i, vsi->bw_ets_share_credits[i],
+ vsi->bw_ets_limit_credits[i],
+ vsi->bw_ets_max_quanta[i]);
+ }
+}
+
+/**
+ * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
+{
+ struct i40e_adminq_ring *ring;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* first the send (command) ring, then the receive (event) ring */
+ dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
+ ring = &(hw->aq.asq);
+ for (i = 0; i < ring->count; i++) {
+ struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ dev_info(&pf->pdev->dev,
+ " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+ i, d->flags, d->opcode, d->datalen, d->retval,
+ d->cookie_high, d->cookie_low);
+ dev_info(&pf->pdev->dev,
+ " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ d->params.raw[0], d->params.raw[1], d->params.raw[2],
+ d->params.raw[3], d->params.raw[4], d->params.raw[5],
+ d->params.raw[6], d->params.raw[7], d->params.raw[8],
+ d->params.raw[9], d->params.raw[10], d->params.raw[11],
+ d->params.raw[12], d->params.raw[13],
+ d->params.raw[14], d->params.raw[15]);
+ }
+
+ dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
+ ring = &(hw->aq.arq);
+ for (i = 0; i < ring->count; i++) {
+ struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ dev_info(&pf->pdev->dev,
+ " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+ i, d->flags, d->opcode, d->datalen, d->retval,
+ d->cookie_high, d->cookie_low);
+ dev_info(&pf->pdev->dev,
+ " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ d->params.raw[0], d->params.raw[1], d->params.raw[2],
+ d->params.raw[3], d->params.raw[4], d->params.raw[5],
+ d->params.raw[6], d->params.raw[7], d->params.raw[8],
+ d->params.raw[9], d->params.raw[10], d->params.raw[11],
+ d->params.raw[12], d->params.raw[13],
+ d->params.raw[14], d->params.raw[15]);
+ }
+}
+
+/**
+ * i40e_dbg_dump_desc - handles dump desc write into command datum
+ * @cnt: number of arguments that the user supplied
+ * @vsi_seid: vsi id entered by user
+ * @ring_id: ring id entered by user
+ * @desc_n: descriptor number entered by user
+ * @pf: the i40e_pf created in command write
+ * @is_rx_ring: true if rx, false if tx
+ **/
+static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
+ struct i40e_pf *pf, bool is_rx_ring)
+{
+ union i40e_rx_desc *ds;
+ struct i40e_ring ring;
+ struct i40e_vsi *vsi;
+ int i;
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "vsi %d not found\n", vsi_seid);
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
+ dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (is_rx_ring)
+ ring = vsi->rx_rings[ring_id];
+ else
+ ring = vsi->tx_rings[ring_id];
+ if (cnt == 2) {
+ dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
+ vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
+ for (i = 0; i < ring.count; i++) {
+ if (is_rx_ring)
+ ds = I40E_RX_DESC(&ring, i);
+ else
+ ds = (union i40e_rx_desc *)
+ I40E_TX_DESC(&ring, i);
+ if ((sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx\n", i,
+ ds->read.pkt_addr, ds->read.hdr_addr);
+ else
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ i, ds->read.pkt_addr,
+ ds->read.hdr_addr,
+ ds->read.rsvd1, ds->read.rsvd2);
+ }
+ } else if (cnt == 3) {
+ if (desc_n >= ring.count || desc_n < 0) {
+ dev_info(&pf->pdev->dev,
+ "descriptor %d not found\n", desc_n);
+ return;
+ }
+ if (is_rx_ring)
+ ds = I40E_RX_DESC(&ring, desc_n);
+ else
+ ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
+ if ((sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
+ desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
+ else
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id,
+ desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
+ ds->read.rsvd1, ds->read.rsvd2);
+ } else {
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ }
+}
+
+/**
+ * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i])
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
+ i, pf->vsi[i]->seid);
+}
+
+/**
+ * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @estats: the eth stats structure to be dumped
+ **/
+static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
+ struct i40e_eth_stats *estats)
+{
+ dev_info(&pf->pdev->dev, " ethstats:\n");
+ dev_info(&pf->pdev->dev,
+ " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
+ estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
+ dev_info(&pf->pdev->dev,
+ " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
+ estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
+ dev_info(&pf->pdev->dev,
+ " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+ estats->rx_missed, estats->rx_unknown_protocol,
+ estats->tx_bytes);
+ dev_info(&pf->pdev->dev,
+ " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
+ estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
+ dev_info(&pf->pdev->dev,
+ " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
+ estats->tx_discards, estats->tx_errors);
+}
+
+/**
+ * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @stats: the stats structure to be dumped
+ **/
+static void i40e_dbg_dump_stats(struct i40e_pf *pf,
+ struct i40e_hw_port_stats *stats)
+{
+ int i;
+
+ dev_info(&pf->pdev->dev, " stats:\n");
+ dev_info(&pf->pdev->dev,
+ " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n",
+ stats->crc_errors, stats->illegal_bytes, stats->error_bytes);
+ dev_info(&pf->pdev->dev,
+ " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n",
+ stats->mac_local_faults, stats->mac_remote_faults,
+ stats->rx_length_errors);
+ dev_info(&pf->pdev->dev,
+ " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n",
+ stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx);
+ dev_info(&pf->pdev->dev,
+ " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n",
+ stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127);
+ dev_info(&pf->pdev->dev,
+ " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n",
+ stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023);
+ dev_info(&pf->pdev->dev,
+ " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n",
+ stats->rx_size_big, stats->rx_undersize, stats->rx_jabber);
+ dev_info(&pf->pdev->dev,
+ " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n",
+ stats->rx_fragments, stats->rx_oversize, stats->tx_size_64);
+ dev_info(&pf->pdev->dev,
+ " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n",
+ stats->tx_size_127, stats->tx_size_255, stats->tx_size_511);
+ dev_info(&pf->pdev->dev,
+ " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n",
+ stats->tx_size_1023, stats->tx_size_big,
+ stats->mac_short_packet_dropped);
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_rx[i],
+ i+1, stats->priority_xon_rx[i+1],
+ i+2, stats->priority_xon_rx[i+2],
+ i+3, stats->priority_xon_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xoff_rx[i],
+ i+1, stats->priority_xoff_rx[i+1],
+ i+2, stats->priority_xoff_rx[i+2],
+ i+3, stats->priority_xoff_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_tx[i],
+ i+1, stats->priority_xon_tx[i+1],
+ i+2, stats->priority_xon_tx[i+2],
+ i+3, stats->priority_xon_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xoff_tx[i],
+ i+1, stats->priority_xoff_tx[i+1],
+ i+2, stats->priority_xoff_tx[i+2],
+ i+3, stats->priority_xoff_tx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_2_xoff[i],
+ i+1, stats->priority_xon_2_xoff[i+1],
+ i+2, stats->priority_xon_2_xoff[i+2],
+ i+3, stats->priority_xon_2_xoff[i+3]);
+ }
+
+ i40e_dbg_dump_eth_stats(pf, &stats->eth);
+}
+
+/**
+ * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
+{
+ struct i40e_veb *veb;
+
+ if ((seid < I40E_BASE_VEB_SEID) ||
+ (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ return;
+ }
+
+ veb = i40e_dbg_find_veb(pf, seid);
+ if (!veb) {
+ dev_info(&pf->pdev->dev,
+ "%d: can't find veb\n", seid);
+ return;
+ }
+ dev_info(&pf->pdev->dev,
+ "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n",
+ veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
+ veb->uplink_seid);
+ i40e_dbg_dump_eth_stats(pf, &veb->stats);
+}
+
+/**
+ * i40e_dbg_dump_veb_all - dumps all known veb's stats
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ veb = pf->veb[i];
+ if (veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
+ }
+}
+
+#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
+/**
+ * i40e_dbg_command_write - write into command datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ struct i40e_vsi *vsi;
+ u8 *print_buf_start;
+ u8 *print_buf;
+ char *cmd_buf;
+ int vsi_seid;
+ int veb_seid;
+ int cnt;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ cmd_buf[count] = '\0';
+
+ print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
+ if (!print_buf_start)
+ goto command_write_done;
+ print_buf = print_buf_start;
+
+ if (strncmp(cmd_buf, "add vsi", 7) == 0) {
+ vsi_seid = -1;
+ cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ if (cnt == 0) {
+ /* default to PF VSI */
+ vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+ } else if (vsi_seid < 0) {
+ dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
+ if (vsi)
+ dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
+ vsi->seid, vsi->uplink_seid);
+ else
+ dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
+
+ } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
+ sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
+ i40e_vsi_release(vsi);
+
+ } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
+ struct i40e_veb *veb;
+ int uplink_seid, i;
+
+ cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev,
+ "add relay: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ } else if (uplink_seid < 0) {
+ dev_info(&pf->pdev->dev,
+ "add relay %d: bad uplink seid\n",
+ uplink_seid);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: vsi VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
+ break;
+ if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
+ uplink_seid != pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: relay uplink %d not found\n",
+ uplink_seid);
+ goto command_write_done;
+ }
+
+ veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
+ vsi->tc_config.enabled_tc);
+ if (veb)
+ dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
+ else
+ dev_info(&pf->pdev->dev, "add relay failed\n");
+
+ } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ int i;
+ cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del relay: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ } else if (veb_seid < 0) {
+ dev_info(&pf->pdev->dev,
+ "del relay %d: bad relay seid\n", veb_seid);
+ goto command_write_done;
+ }
+
+ /* find the veb */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ break;
+ if (i >= I40E_MAX_VEB) {
+ dev_info(&pf->pdev->dev,
+ "del relay: relay %d not found\n", veb_seid);
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
+ i40e_veb_release(pf->veb[i]);
+
+ } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
+ u8 ma[6];
+ int vlan = 0;
+ struct i40e_mac_filter *f;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[11],
+ "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+ &vsi_seid,
+ &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+ &vlan);
+ if (cnt == 7) {
+ vlan = 0;
+ } else if (cnt != 8) {
+ dev_info(&pf->pdev->dev,
+ "add macaddr: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add macaddr: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ f = i40e_add_filter(vsi, ma, vlan, false, false);
+ ret = i40e_sync_vsi_filters(vsi);
+ if (f && !ret)
+ dev_info(&pf->pdev->dev,
+ "add macaddr: %pM vlan=%d added to VSI %d\n",
+ ma, vlan, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
+ ma, vlan, vsi_seid, f, ret);
+
+ } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
+ u8 ma[6];
+ int vlan = 0;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[11],
+ "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+ &vsi_seid,
+ &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+ &vlan);
+ if (cnt == 7) {
+ vlan = 0;
+ } else if (cnt != 8) {
+ dev_info(&pf->pdev->dev,
+ "del macaddr: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "del macaddr: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ i40e_del_filter(vsi, ma, vlan, false, false);
+ ret = i40e_sync_vsi_filters(vsi);
+ if (!ret)
+ dev_info(&pf->pdev->dev,
+ "del macaddr: %pM vlan=%d removed from VSI %d\n",
+ ma, vlan, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
+ ma, vlan, vsi_seid, ret);
+
+ } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
+ int v;
+ u16 vid;
+ i40e_status ret;
+
+ cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev,
+ "add pvid: bad command string, cnt=%d\n", cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ vid = (unsigned)v;
+ ret = i40e_vsi_add_pvid(vsi, vid);
+ if (!ret)
+ dev_info(&pf->pdev->dev,
+ "add pvid: %d added to VSI %d\n",
+ vid, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "add pvid: %d to VSI %d failed, ret=%d\n",
+ vid, vsi_seid, ret);
+
+ } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
+
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del pvid: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "del pvid: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ i40e_vsi_remove_pvid(vsi);
+ dev_info(&pf->pdev->dev,
+ "del pvid: removed from VSI %d\n", vsi_seid);
+
+ } else if (strncmp(cmd_buf, "dump", 4) == 0) {
+ if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
+ i40e_fetch_switch_configuration(pf, true);
+ } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt > 0)
+ i40e_dbg_dump_vsi_seid(pf, vsi_seid);
+ else
+ i40e_dbg_dump_vsi_no_seid(pf);
+ } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt > 0)
+ i40e_dbg_dump_veb_seid(pf, vsi_seid);
+ else
+ i40e_dbg_dump_veb_all(pf);
+ } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
+ int ring_id, desc_n;
+ if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
+ cnt = sscanf(&cmd_buf[12], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, true);
+ } else if (strncmp(&cmd_buf[10], "tx", 2)
+ == 0) {
+ cnt = sscanf(&cmd_buf[12], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, false);
+ } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
+ i40e_dbg_dump_aq_desc(pf);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev,
+ "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc aq\n");
+ }
+ } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) {
+ dev_info(&pf->pdev->dev, "pf stats:\n");
+ i40e_dbg_dump_stats(pf, &pf->stats);
+ dev_info(&pf->pdev->dev, "pf stats_offsets:\n");
+ i40e_dbg_dump_stats(pf, &pf->stats_offsets);
+ } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
+ dev_info(&pf->pdev->dev,
+ "core reset count: %d\n", pf->corer_count);
+ dev_info(&pf->pdev->dev,
+ "global reset count: %d\n", pf->globr_count);
+ dev_info(&pf->pdev->dev,
+ "emp reset count: %d\n", pf->empr_count);
+ dev_info(&pf->pdev->dev,
+ "pf reset count: %d\n", pf->pfr_count);
+ } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
+ struct i40e_aqc_query_port_ets_config_resp *bw_data;
+ struct i40e_dcbx_config *cfg =
+ &pf->hw.local_dcbx_config;
+ struct i40e_dcbx_config *r_cfg =
+ &pf->hw.remote_dcbx_config;
+ int i, ret;
+
+ bw_data = kzalloc(sizeof(
+ struct i40e_aqc_query_port_ets_config_resp),
+ GFP_KERNEL);
+ if (!bw_data) {
+ ret = -ENOMEM;
+ goto command_write_done;
+ }
+
+ ret = i40e_aq_query_port_ets_config(&pf->hw,
+ pf->mac_seid,
+ bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Query Port ETS Config AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(bw_data);
+ bw_data = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
+ bw_data->tc_valid_bits,
+ bw_data->tc_strict_priority_bits,
+ le16_to_cpu(bw_data->tc_bw_max[0]),
+ le16_to_cpu(bw_data->tc_bw_max[1]));
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
+ bw_data->tc_bw_share_credits[i],
+ le16_to_cpu(bw_data->tc_bw_limits[i]));
+ }
+
+ kfree(bw_data);
+ bw_data = NULL;
+
+ dev_info(&pf->pdev->dev,
+ "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+ cfg->etscfg.willing, cfg->etscfg.cbs,
+ cfg->etscfg.maxtcs);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, cfg->etscfg.prioritytable[i],
+ cfg->etscfg.tcbwtable[i],
+ cfg->etscfg.tsatable[i]);
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, cfg->etsrec.prioritytable[i],
+ cfg->etsrec.tcbwtable[i],
+ cfg->etsrec.tsatable[i]);
+ }
+ dev_info(&pf->pdev->dev,
+ "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+ cfg->pfc.willing, cfg->pfc.mbc,
+ cfg->pfc.pfccap, cfg->pfc.pfcenable);
+ dev_info(&pf->pdev->dev,
+ "port app_table: num_apps=%d\n", cfg->numapps);
+ for (i = 0; i < cfg->numapps; i++) {
+ dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+ i, cfg->app[i].priority,
+ cfg->app[i].selector,
+ cfg->app[i].protocolid);
+ }
+ /* Peer TLV DCBX data */
+ dev_info(&pf->pdev->dev,
+ "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+ r_cfg->etscfg.willing,
+ r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, r_cfg->etscfg.prioritytable[i],
+ r_cfg->etscfg.tcbwtable[i],
+ r_cfg->etscfg.tsatable[i]);
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, r_cfg->etsrec.prioritytable[i],
+ r_cfg->etsrec.tcbwtable[i],
+ r_cfg->etsrec.tsatable[i]);
+ }
+ dev_info(&pf->pdev->dev,
+ "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+ r_cfg->pfc.willing,
+ r_cfg->pfc.mbc,
+ r_cfg->pfc.pfccap,
+ r_cfg->pfc.pfcenable);
+ dev_info(&pf->pdev->dev,
+ "remote port app_table: num_apps=%d\n",
+ r_cfg->numapps);
+ for (i = 0; i < r_cfg->numapps; i++) {
+ dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+ i, r_cfg->app[i].priority,
+ r_cfg->app[i].selector,
+ r_cfg->app[i].protocolid);
+ }
+ } else {
+ dev_info(&pf->pdev->dev,
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
+ dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n");
+ dev_info(&pf->pdev->dev, "dump stats\n");
+ dev_info(&pf->pdev->dev, "dump reset stats\n");
+ dev_info(&pf->pdev->dev, "dump port\n");
+ dev_info(&pf->pdev->dev,
+ "dump debug fwdata <cluster_id> <table_id> <index>\n");
+ }
+
+ } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
+ u32 level;
+ cnt = sscanf(&cmd_buf[10], "%i", &level);
+ if (cnt) {
+ if (I40E_DEBUG_USER & level) {
+ pf->hw.debug_mask = level;
+ dev_info(&pf->pdev->dev,
+ "set hw.debug_mask = 0x%08x\n",
+ pf->hw.debug_mask);
+ }
+ pf->msg_enable = level;
+ dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
+ pf->msg_enable);
+ } else {
+ dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
+ pf->msg_enable);
+ }
+ } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
+ dev_info(&pf->pdev->dev, "forcing PFR\n");
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "corer", 5) == 0) {
+ dev_info(&pf->pdev->dev, "forcing CoreR\n");
+ i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "globr", 5) == 0) {
+ dev_info(&pf->pdev->dev, "forcing GlobR\n");
+ i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "read", 4) == 0) {
+ u32 address;
+ u32 value;
+ cnt = sscanf(&cmd_buf[4], "%x", &address);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "read <reg>\n");
+ goto command_write_done;
+ }
+
+ /* check the range on address */
+ if (address >= I40E_MAX_REGISTER) {
+ dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
+ address);
+ goto command_write_done;
+ }
+
+ value = rd32(&pf->hw, address);
+ dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
+ address, value);
+
+ } else if (strncmp(cmd_buf, "write", 5) == 0) {
+ u32 address, value;
+ cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev, "write <reg> <value>\n");
+ goto command_write_done;
+ }
+
+ /* check the range on address */
+ if (address >= I40E_MAX_REGISTER) {
+ dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
+ address);
+ goto command_write_done;
+ }
+ wr32(&pf->hw, address, value);
+ value = rd32(&pf->hw, address);
+ dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
+ address, value);
+ } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
+ if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
+ cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+ if (cnt == 0) {
+ int i;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ i40e_vsi_reset_stats(pf->vsi[i]);
+ dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
+ } else if (cnt == 1) {
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "clear_stats vsi: bad vsi %d\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ i40e_vsi_reset_stats(vsi);
+ dev_info(&pf->pdev->dev,
+ "vsi clear stats called for vsi %d\n",
+ vsi_seid);
+ } else {
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
+ }
+ } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) {
+ i40e_pf_reset_stats(pf);
+ dev_info(&pf->pdev->dev, "pf clear stats called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
+ }
+ } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
+ (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
+ struct i40e_fdir_data fd_data;
+ int ret;
+ u16 packet_len, i, j = 0;
+ char *asc_packet;
+ bool add = false;
+
+ asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+ if (!asc_packet)
+ goto command_write_done;
+
+ fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+
+ if (!fd_data.raw_packet) {
+ kfree(asc_packet);
+ asc_packet = NULL;
+ goto command_write_done;
+ }
+
+ if (strncmp(cmd_buf, "add", 3) == 0)
+ add = true;
+ cnt = sscanf(&cmd_buf[13],
+ "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+ &fd_data.q_index,
+ &fd_data.flex_off, &fd_data.pctype,
+ &fd_data.dest_vsi, &fd_data.dest_ctl,
+ &fd_data.fd_status, &fd_data.cnt_index,
+ &fd_data.fd_id, &packet_len, asc_packet);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "program fd_filter: bad command string, cnt=%d\n",
+ cnt);
+ kfree(asc_packet);
+ asc_packet = NULL;
+ kfree(fd_data.raw_packet);
+ goto command_write_done;
+ }
+
+ /* fix packet length if user entered 0 */
+ if (packet_len == 0)
+ packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
+
+ /* make sure to check the max as well */
+ packet_len = min_t(u16,
+ packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+
+ dev_info(&pf->pdev->dev, "FD raw packet:\n");
+ for (i = 0; i < packet_len; i++) {
+ sscanf(&asc_packet[j], "%2hhx ",
+ &fd_data.raw_packet[i]);
+ j += 3;
+ snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ ret = i40e_program_fdir_filter(&fd_data, pf, add);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed %d\n", ret);
+ }
+ kfree(fd_data.raw_packet);
+ fd_data.raw_packet = NULL;
+ kfree(asc_packet);
+ asc_packet = NULL;
+ } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+ if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
+ int ret;
+ ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Stop LLDP AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
+ int ret;
+ ret = i40e_aq_start_lldp(&pf->hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Start LLDP AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5],
+ "get local", 9) == 0) {
+ int ret, i;
+ u8 *buff;
+ u16 llen, rlen;
+ buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
+ I40E_AQ_LLDP_MIB_LOCAL,
+ buff, I40E_LLDPDU_SIZE,
+ &llen, &rlen, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (local) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (local) AQ buffer written back:\n");
+ for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
+ snprintf(print_buf, 3, "%02x ", buff[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ kfree(buff);
+ buff = NULL;
+ } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+ int ret, i;
+ u8 *buff;
+ u16 llen, rlen;
+ buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_get_lldp_mib(&pf->hw,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ I40E_AQ_LLDP_MIB_LOCAL,
+ buff, I40E_LLDPDU_SIZE,
+ &llen, &rlen, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (remote) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (remote) AQ buffer written back:\n");
+ for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
+ snprintf(print_buf, 3, "%02x ", buff[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ kfree(buff);
+ buff = NULL;
+ } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
+ int ret;
+ ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+ true, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
+ int ret;
+ ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+ false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ }
+ } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
+ u16 buffer_len, i, bytes;
+ u16 module;
+ u32 offset;
+ u16 *buff;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
+ &module, &offset, &buffer_len);
+ if (cnt == 0) {
+ module = 0;
+ offset = 0;
+ buffer_len = 0;
+ } else if (cnt == 1) {
+ offset = 0;
+ buffer_len = 0;
+ } else if (cnt == 2) {
+ buffer_len = 0;
+ } else if (cnt > 3) {
+ dev_info(&pf->pdev->dev,
+ "nvm read: bad command string, cnt=%d\n", cnt);
+ goto command_write_done;
+ }
+
+ /* Read at least 512 words */
+ if (buffer_len == 0)
+ buffer_len = 512;
+
+ bytes = 2 * buffer_len;
+ buff = kzalloc(bytes, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(buff);
+ goto command_write_done;
+ }
+
+ ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
+ bytes, (u8 *)buff, true, NULL);
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Read NVM AQ failed err=%d status=0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Read NVM module=0x%x offset=0x%x words=%d\n",
+ module, offset, buffer_len);
+ for (i = 0; i < buffer_len; i++) {
+ if ((i % 16) == 0) {
+ snprintf(print_buf, 11, "\n0x%08x: ",
+ offset + i);
+ print_buf += 11;
+ }
+ snprintf(print_buf, 5, "%04x ", buff[i]);
+ print_buf += 5;
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ }
+ kfree(buff);
+ buff = NULL;
+ } else {
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
+ dev_info(&pf->pdev->dev, "available commands\n");
+ dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
+ dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
+ dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
+ dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+ dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+ dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
+ dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " dump switch\n");
+ dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
+ dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc aq\n");
+ dev_info(&pf->pdev->dev, " dump stats\n");
+ dev_info(&pf->pdev->dev, " dump reset stats\n");
+ dev_info(&pf->pdev->dev, " msg_enable [level]\n");
+ dev_info(&pf->pdev->dev, " read <reg>\n");
+ dev_info(&pf->pdev->dev, " write <reg> <value>\n");
+ dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
+ dev_info(&pf->pdev->dev, " clear_stats pf\n");
+ dev_info(&pf->pdev->dev, " pfr\n");
+ dev_info(&pf->pdev->dev, " corer\n");
+ dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " lldp start\n");
+ dev_info(&pf->pdev->dev, " lldp stop\n");
+ dev_info(&pf->pdev->dev, " lldp get local\n");
+ dev_info(&pf->pdev->dev, " lldp get remote\n");
+ dev_info(&pf->pdev->dev, " lldp event on\n");
+ dev_info(&pf->pdev->dev, " lldp event off\n");
+ dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
+ }
+
+command_write_done:
+ kfree(cmd_buf);
+ cmd_buf = NULL;
+ kfree(print_buf_start);
+ print_buf = NULL;
+ print_buf_start = NULL;
+ return count;
+}
+
+static const struct file_operations i40e_dbg_command_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_command_read,
+ .write = i40e_dbg_command_write,
+};
+
+/**************************************************************
+ * netdev_ops
+ * The netdev_ops entry in debugfs is for giving the driver commands
+ * to be executed from the netdev operations.
+ **************************************************************/
+static char i40e_dbg_netdev_ops_buf[256] = "hello world";
+
+/**
+ * i40e_dbg_netdev_ops - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ int buf_size = 256;
+ char *buf;
+ int len;
+
+ /* don't allow partal reads */
+ if (*ppos != 0)
+ return 0;
+ if (count < buf_size)
+ return -ENOSPC;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ len = snprintf(buf, buf_size, "%s: %s\n",
+ pf->vsi[pf->lan_vsi]->netdev->name,
+ i40e_dbg_netdev_ops_buf);
+
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ kfree(buf);
+
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * i40e_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ struct i40e_vsi *vsi;
+ int vsi_seid;
+ int i, cnt;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(i40e_dbg_netdev_ops_buf))
+ return -ENOSPC;
+
+ memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
+ bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
+ buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ else if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ i40e_dbg_netdev_ops_buf[count] = '\0';
+
+ if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "tx_timeout: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "tx_timeout called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ int mtu;
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+ &vsi_seid, &mtu);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "change_mtu: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
+ mtu);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "change_mtu called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "set_rx_mode: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "set_rx_mode called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
+ vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ napi_schedule(&vsi->q_vectors[i].napi);
+ dev_info(&pf->pdev->dev, "napi called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n",
+ i40e_dbg_netdev_ops_buf);
+ dev_info(&pf->pdev->dev, "available commands\n");
+ dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
+ dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
+ }
+netdev_ops_write_done:
+ return count;
+}
+
+static const struct file_operations i40e_dbg_netdev_ops_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_netdev_ops_read,
+ .write = i40e_dbg_netdev_ops_write,
+};
+
+/**
+ * i40e_dbg_pf_init - setup the debugfs directory for the pf
+ * @pf: the pf that is starting up
+ **/
+void i40e_dbg_pf_init(struct i40e_pf *pf)
+{
+ struct dentry *pfile __attribute__((unused));
+ const char *name = pci_name(pf->pdev);
+
+ pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
+ if (pf->i40e_dbg_pf) {
+ pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
+ pf, &i40e_dbg_command_fops);
+ pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_dump_fops);
+ pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
+ pf, &i40e_dbg_netdev_ops_fops);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "debugfs entry for %s failed\n", name);
+ }
+}
+
+/**
+ * i40e_dbg_pf_exit - clear out the pf's debugfs entries
+ * @pf: the pf that is stopping
+ **/
+void i40e_dbg_pf_exit(struct i40e_pf *pf)
+{
+ debugfs_remove_recursive(pf->i40e_dbg_pf);
+ pf->i40e_dbg_pf = NULL;
+
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buf = NULL;
+}
+
+/**
+ * i40e_dbg_init - start up debugfs for the driver
+ **/
+void i40e_dbg_init(void)
+{
+ i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
+ if (!i40e_dbg_root)
+ pr_info("init of debugfs failed\n");
+}
+
+/**
+ * i40e_dbg_exit - clean out the driver's debugfs entries
+ **/
+void i40e_dbg_exit(void)
+{
+ debugfs_remove_recursive(i40e_dbg_root);
+ i40e_dbg_root = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
new file mode 100644
index 0000000..de25514
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -0,0 +1,131 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
+{
+ const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u32 pat, val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+ pat = patterns[i];
+ wr32(hw, reg, (pat & mask));
+ val = rd32(hw, reg);
+ if ((val & mask) != (pat & mask)) {
+ i40e_debug(hw, I40E_DEBUG_DIAG,
+ "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
+ __func__, reg, pat, val);
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ i40e_debug(hw, I40E_DEBUG_DIAG,
+ "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
+ __func__, reg, orig_val, val);
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return 0;
+}
+
+struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ /* offset mask elements stride */
+ {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ { 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+ u32 reg, mask;
+ u32 i, j;
+
+ for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
+ mask = i40e_reg_list[i].mask;
+ for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
+ reg = i40e_reg_list[i].offset +
+ (j * i40e_reg_list[i].stride);
+ ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ u16 reg_val;
+
+ /* read NVM control word and if NVM valid, validate EEPROM checksum*/
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+ if ((!ret_code) &&
+ ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+ (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
+ ret_code = i40e_validate_nvm_checksum(hw, NULL);
+ } else {
+ ret_code = I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
new file mode 100644
index 0000000..3d98277
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -0,0 +1,52 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+ I40E_LB_MODE_NONE = 0,
+ I40E_LB_MODE_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+ u32 offset; /* the base register */
+ u32 mask; /* bits that can be tested */
+ u32 elements; /* number of elements if array */
+ u32 stride; /* bytes between each element */
+};
+
+extern struct i40e_diag_reg_test_info i40e_reg_list[];
+
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
new file mode 100644
index 0000000..9a76b8c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -0,0 +1,1449 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40e */
+
+#include "i40e.h"
+#include "i40e_diag.h"
+
+struct i40e_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define I40E_STAT(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
+}
+#define I40E_NETDEV_STAT(_net_stat) \
+ I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
+#define I40E_PF_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_pf, _name, _stat)
+#define I40E_VSI_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_vsi, _name, _stat)
+
+static const struct i40e_stats i40e_gstrings_net_stats[] = {
+ I40E_NETDEV_STAT(rx_packets),
+ I40E_NETDEV_STAT(tx_packets),
+ I40E_NETDEV_STAT(rx_bytes),
+ I40E_NETDEV_STAT(tx_bytes),
+ I40E_NETDEV_STAT(rx_errors),
+ I40E_NETDEV_STAT(tx_errors),
+ I40E_NETDEV_STAT(rx_dropped),
+ I40E_NETDEV_STAT(tx_dropped),
+ I40E_NETDEV_STAT(multicast),
+ I40E_NETDEV_STAT(collisions),
+ I40E_NETDEV_STAT(rx_length_errors),
+ I40E_NETDEV_STAT(rx_crc_errors),
+};
+
+/* These PF_STATs might look like duplicates of some NETDEV_STATs,
+ * but they are separate. This device supports Virtualization, and
+ * as such might have several netdevs supporting VMDq and FCoE going
+ * through a single port. The NETDEV_STATs are for individual netdevs
+ * seen at the top of the stack, and the PF_STATs are for the physical
+ * function at the bottom of the stack hosting those netdevs.
+ *
+ * The PF_STATs are appended to the netdev stats only when ethtool -S
+ * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
+ */
+static struct i40e_stats i40e_gstrings_stats[] = {
+ I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
+ I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
+ I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
+ I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
+ I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
+ I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
+ I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
+ I40E_PF_STAT("crc_errors", stats.crc_errors),
+ I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
+ I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
+ I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
+ I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
+ I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
+ I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
+ I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("rx_size_64", stats.rx_size_64),
+ I40E_PF_STAT("rx_size_127", stats.rx_size_127),
+ I40E_PF_STAT("rx_size_255", stats.rx_size_255),
+ I40E_PF_STAT("rx_size_511", stats.rx_size_511),
+ I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
+ I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
+ I40E_PF_STAT("rx_size_big", stats.rx_size_big),
+ I40E_PF_STAT("tx_size_64", stats.tx_size_64),
+ I40E_PF_STAT("tx_size_127", stats.tx_size_127),
+ I40E_PF_STAT("tx_size_255", stats.tx_size_255),
+ I40E_PF_STAT("tx_size_511", stats.tx_size_511),
+ I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
+ I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
+ I40E_PF_STAT("tx_size_big", stats.tx_size_big),
+ I40E_PF_STAT("rx_undersize", stats.rx_undersize),
+ I40E_PF_STAT("rx_fragments", stats.rx_fragments),
+ I40E_PF_STAT("rx_oversize", stats.rx_oversize),
+ I40E_PF_STAT("rx_jabber", stats.rx_jabber),
+ I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+};
+
+#define I40E_QUEUE_STATS_LEN(n) \
+ ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
+ ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
+#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+ I40E_QUEUE_STATS_LEN((n)))
+#define I40E_PFC_STATS_LEN ( \
+ (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
+ / sizeof(u64))
+#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
+ I40E_PFC_STATS_LEN + \
+ I40E_VSI_STATS_LEN((n)))
+
+enum i40e_ethtool_test_id {
+ I40E_ETH_TEST_REG = 0,
+ I40E_ETH_TEST_EEPROM,
+ I40E_ETH_TEST_INTR,
+ I40E_ETH_TEST_LOOPBACK,
+ I40E_ETH_TEST_LINK,
+};
+
+static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Eeprom test (offline)",
+ "Interrupt test (offline)",
+ "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+
+#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+ u32 link_speed = hw_link_info->link_speed;
+
+ /* hardware is either in 40G mode or 10G mode
+ * NOTE: this section initializes supported and advertising
+ */
+ switch (hw_link_info->phy_type) {
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ ecmd->supported = SUPPORTED_40000baseCR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseCR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ ecmd->supported = SUPPORTED_40000baseKR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ ecmd->supported = SUPPORTED_40000baseLR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseLR4_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ ecmd->supported = SUPPORTED_10000baseKX4_Full;
+ ecmd->advertising = ADVERTISED_10000baseKX4_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KR:
+ ecmd->supported = SUPPORTED_10000baseKR_Full;
+ ecmd->advertising = ADVERTISED_10000baseKR_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_T:
+ default:
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ break;
+ }
+
+ /* for now just say autoneg all the time */
+ ecmd->supported |= SUPPORTED_Autoneg;
+
+ if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ ecmd->supported |= SUPPORTED_Backplane;
+ ecmd->advertising |= ADVERTISED_Backplane;
+ ecmd->port = PORT_NONE;
+ } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ }
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (link_up) {
+ switch (link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ /* need a SPEED_40000 in ethtool.h */
+ ethtool_cmd_speed_set(ecmd, 40000);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_pauseparam - Get Flow Control status
+ * Return tx/rx-pause status
+ **/
+static void i40e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+ pause->autoneg =
+ ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ pause->rx_pause = 1;
+ if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
+ pause->tx_pause = 1;
+}
+
+static u32 i40e_get_msglevel(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ return pf->msg_enable;
+}
+
+static void i40e_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (I40E_DEBUG_USER & data)
+ pf->hw.debug_mask = data;
+ pf->msg_enable = data;
+}
+
+static int i40e_get_regs_len(struct net_device *netdev)
+{
+ int reg_count = 0;
+ int i;
+
+ for (i = 0; i40e_reg_list[i].offset != 0; i++)
+ reg_count += i40e_reg_list[i].elements;
+
+ return reg_count * sizeof(u32);
+}
+
+static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 *reg_buf = p;
+ int i, j, ri;
+ u32 reg;
+
+ /* Tell ethtool which driver-version-specific regs output we have.
+ *
+ * At some point, if we have ethtool doing special formatting of
+ * this data, it will rely on this version number to know how to
+ * interpret things. Hence, this needs to be updated if/when the
+ * diags register table is changed.
+ */
+ regs->version = 1;
+
+ /* loop through the diags reg table for what to print */
+ ri = 0;
+ for (i = 0; i40e_reg_list[i].offset != 0; i++) {
+ for (j = 0; j < i40e_reg_list[i].elements; j++) {
+ reg = i40e_reg_list[i].offset
+ + (j * i40e_reg_list[i].stride);
+ reg_buf[ri++] = rd32(hw, reg);
+ }
+ }
+
+}
+
+static int i40e_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ int first_word, last_word;
+ u16 i, eeprom_len;
+ u16 *eeprom_buff;
+ int ret_val = 0;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_len = last_word - first_word + 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
+ eeprom_buff);
+ if (eeprom_len == 0) {
+ kfree(eeprom_buff);
+ return -EACCES;
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < eeprom_len; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int i40e_get_eeprom_len(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+
+ return hw->nvm.sr_size * 2;
+}
+
+static void i40e_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, i40e_driver_version_str,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void i40e_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = vsi->rx_rings[0].count;
+ ring->tx_pending = vsi->tx_rings[0].count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int i40e_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 new_rx_count, new_tx_count;
+ int i, err = 0;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ I40E_MIN_NUM_DESCRIPTORS,
+ I40E_MAX_NUM_DESCRIPTORS);
+ new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS,
+ I40E_MAX_NUM_DESCRIPTORS);
+ new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == vsi->tx_rings[0].count) &&
+ (new_rx_count == vsi->rx_rings[0].count))
+ return 0;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(vsi->netdev)) {
+ /* simple case - set for the next time the netdev is started */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ vsi->tx_rings[i].count = new_tx_count;
+ vsi->rx_rings[i].count = new_rx_count;
+ }
+ goto done;
+ }
+
+ /* We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the Tx and Rx ring structs.
+ */
+
+ /* alloc updated Tx resources */
+ if (new_tx_count != vsi->tx_rings[0].count) {
+ netdev_info(netdev,
+ "Changing Tx descriptor count from %d to %d.\n",
+ vsi->tx_rings[0].count, new_tx_count);
+ tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!tx_rings) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* clone ring and setup updated count */
+ tx_rings[i] = vsi->tx_rings[i];
+ tx_rings[i].count = new_tx_count;
+ err = i40e_setup_tx_descriptors(&tx_rings[i]);
+ if (err) {
+ while (i) {
+ i--;
+ i40e_free_tx_resources(&tx_rings[i]);
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+
+ goto done;
+ }
+ }
+ }
+
+ /* alloc updated Rx resources */
+ if (new_rx_count != vsi->rx_rings[0].count) {
+ netdev_info(netdev,
+ "Changing Rx descriptor count from %d to %d\n",
+ vsi->rx_rings[0].count, new_rx_count);
+ rx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!rx_rings) {
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* clone ring and setup updated count */
+ rx_rings[i] = vsi->rx_rings[i];
+ rx_rings[i].count = new_rx_count;
+ err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err) {
+ while (i) {
+ i--;
+ i40e_free_rx_resources(&rx_rings[i]);
+ }
+ kfree(rx_rings);
+ rx_rings = NULL;
+
+ goto free_tx;
+ }
+ }
+ }
+
+ /* Bring interface down, copy in the new ring info,
+ * then restore the interface
+ */
+ i40e_down(vsi);
+
+ if (tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_free_tx_resources(&vsi->tx_rings[i]);
+ vsi->tx_rings[i] = tx_rings[i];
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+ }
+
+ if (rx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_free_rx_resources(&vsi->rx_rings[i]);
+ vsi->rx_rings[i] = rx_rings[i];
+ }
+ kfree(rx_rings);
+ rx_rings = NULL;
+ }
+
+ i40e_up(vsi);
+
+free_tx:
+ /* error cleanup if the Rx allocations failed after getting Tx */
+ if (tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ i40e_free_tx_resources(&tx_rings[i]);
+ kfree(tx_rings);
+ tx_rings = NULL;
+ }
+
+done:
+ clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+
+ return err;
+}
+
+static int i40e_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ return I40E_TEST_LEN;
+ case ETH_SS_STATS:
+ if (vsi == pf->vsi[pf->lan_vsi])
+ return I40E_PF_STATS_LEN(netdev);
+ else
+ return I40E_VSI_STATS_LEN(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void i40e_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int i = 0;
+ char *p;
+ int j;
+ struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+
+ i40e_update_stats(vsi);
+
+ for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
+ p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ data[i++] = vsi->tx_rings[j].tx_stats.packets;
+ data[i++] = vsi->tx_rings[j].tx_stats.bytes;
+ }
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ data[i++] = vsi->rx_rings[j].rx_stats.packets;
+ data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+ p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+ data[i++] = pf->stats.priority_xon_2_xoff[j];
+ }
+}
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ for (i = 0; i < I40E_TEST_LEN; i++) {
+ memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_net_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ i40e_gstrings_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static int i40e_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+{
+ if (i40e_get_link_status(&pf->hw))
+ *data = 0;
+ else
+ *data = 1;
+
+ return *data;
+}
+
+static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+{
+ i40e_status ret;
+
+ ret = i40e_diag_reg_test(&pf->hw);
+ *data = ret;
+
+ return ret;
+}
+
+static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+{
+ i40e_status ret;
+
+ ret = i40e_diag_eeprom_test(&pf->hw);
+ *data = ret;
+
+ return ret;
+}
+
+static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+{
+ *data = -ENOSYS;
+
+ return *data;
+}
+
+static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+{
+ *data = -ENOSYS;
+
+ return *data;
+}
+
+static void i40e_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ set_bit(__I40E_TESTING, &pf->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ netdev_info(netdev, "offline testing starting\n");
+
+ /* Link test performed before hardware reset
+ * so autoneg doesn't interfere with test result
+ */
+ netdev_info(netdev, "link test starting\n");
+ if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ netdev_info(netdev, "register test starting\n");
+ if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "eeprom test starting\n");
+ if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "interrupt test starting\n");
+ if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "loopback test starting\n");
+ if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ } else {
+ netdev_info(netdev, "online test starting\n");
+ /* Online tests */
+ if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Offline only tests, not run in online; pass by default */
+ data[I40E_ETH_TEST_REG] = 0;
+ data[I40E_ETH_TEST_EEPROM] = 0;
+ data[I40E_ETH_TEST_INTR] = 0;
+ data[I40E_ETH_TEST_LOOPBACK] = 0;
+
+ clear_bit(__I40E_TESTING, &pf->state);
+ }
+}
+
+static void i40e_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+}
+
+static int i40e_nway_reset(struct net_device *netdev)
+{
+ /* restart autonegotiation */
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret = 0;
+
+ ret = i40e_aq_set_link_restart_an(hw, NULL);
+ if (ret) {
+ netdev_info(netdev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int i40e_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int blink_freq = 2;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ pf->led_status = i40e_led_get(hw);
+ return blink_freq;
+ case ETHTOOL_ID_ON:
+ i40e_led_set(hw, 0xF);
+ break;
+ case ETHTOOL_ID_OFF:
+ i40e_led_set(hw, 0x0);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ i40e_led_set(hw, pf->led_status);
+ break;
+ }
+
+ return 0;
+}
+
+/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
+ * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
+ * 125us (8000 interrupts per second) == ITR(62)
+ */
+
+static int i40e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ ec->tx_max_coalesced_frames_irq = vsi->work_limit;
+ ec->rx_max_coalesced_frames_irq = vsi->work_limit;
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ ec->rx_coalesce_usecs = 1;
+ else
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ ec->tx_coalesce_usecs = 1;
+ else
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+ return 0;
+}
+
+static int i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_q_vector *q_vector;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vector;
+ int i;
+
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ switch (ec->rx_coalesce_usecs) {
+ case 0:
+ vsi->rx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ break;
+ default:
+ if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ break;
+ }
+
+ switch (ec->tx_coalesce_usecs) {
+ case 0:
+ vsi->tx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ break;
+ default:
+ if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ break;
+ }
+
+ vector = vsi->base_vector;
+ q_vector = vsi->q_vectors;
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on i40e */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through to add IP fields */
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through to add IP fields */
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = vsi->alloc_queue_pairs;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = i40e_get_rss_hash_opts(pf, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = 500;
+ ret = 0;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &=
+ ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |=
+ (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &=
+ ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |=
+ (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+#define IP_HEADER_OFFSET 14
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct udphdr *udp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+ udp->source = fsp->h_u.tcp_ip4_spec.psrc;
+ udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
+ i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct tcphdr *tcp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+ tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @fsp: the ethtool flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+
+ ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
+ ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
+ ip->protocol = fsp->h_u.usr_ip4_spec.proto;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
+ * a specific flow spec based on their protocol
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd, bool add)
+{
+ struct i40e_fdir_data fd_data;
+ int ret = -EINVAL;
+ struct i40e_pf *pf;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (!vsi)
+ return -EINVAL;
+
+ pf = vsi->back;
+
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= vsi->num_queue_pairs))
+ return -EINVAL;
+
+ /* Populate the Flow Director that we have at the moment
+ * and allocate the raw packet buffer for the calling functions
+ */
+ fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+
+ if (!fd_data.raw_packet) {
+ dev_info(&pf->pdev->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ fd_data.q_index = fsp->ring_cookie;
+ fd_data.flex_off = 0;
+ fd_data.pctype = 0;
+ fd_data.dest_vsi = vsi->id;
+ fd_data.dest_ctl = 0;
+ fd_data.fd_status = 0;
+ fd_data.cnt_index = 0;
+ fd_data.fd_id = 0;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
+ break;
+ case UDP_V4_FLOW:
+ ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
+ break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPV4_FLOW:
+ ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
+ break;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
+ break;
+ default:
+ ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
+ break;
+ }
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "Could not specify spec type\n");
+ ret = -EINVAL;
+ }
+
+ kfree(fd_data.raw_packet);
+ fd_data.raw_packet = NULL;
+
+ return ret;
+}
+/**
+ * i40e_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40e_set_rss_hash_opt(pf, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops i40e_ethtool_ops = {
+ .get_settings = i40e_get_settings,
+ .get_drvinfo = i40e_get_drvinfo,
+ .get_regs_len = i40e_get_regs_len,
+ .get_regs = i40e_get_regs,
+ .nway_reset = i40e_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_wol = i40e_get_wol,
+ .get_eeprom_len = i40e_get_eeprom_len,
+ .get_eeprom = i40e_get_eeprom,
+ .get_ringparam = i40e_get_ringparam,
+ .set_ringparam = i40e_set_ringparam,
+ .get_pauseparam = i40e_get_pauseparam,
+ .get_msglevel = i40e_get_msglevel,
+ .set_msglevel = i40e_set_msglevel,
+ .get_rxnfc = i40e_get_rxnfc,
+ .set_rxnfc = i40e_set_rxnfc,
+ .self_test = i40e_diag_test,
+ .get_strings = i40e_get_strings,
+ .set_phys_id = i40e_set_phys_id,
+ .get_sset_count = i40e_get_sset_count,
+ .get_ethtool_stats = i40e_get_ethtool_stats,
+ .get_coalesce = i40e_get_coalesce,
+ .set_coalesce = i40e_set_coalesce,
+ .get_ts_info = i40e_get_ts_info,
+};
+
+void i40e_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
new file mode 100644
index 0000000..901804a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -0,0 +1,366 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#include "i40e_type.h"
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_memory_type mem_type __attribute__((unused));
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ bool dma_mem_alloc_done = false;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
+ sizeof(struct i40e_dma_mem));
+ } else {
+ memcpy(&sd_entry->u.bp.addr, &mem,
+ sizeof(struct i40e_dma_mem));
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+
+ memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = mem.pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ memcpy(pd_addr, &page_desc, sizeof(u64));
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = false;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ if (is_pf)
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+ else
+ I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
+
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+ i40e_status ret_code = 0;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+ if (ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+ if (ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
new file mode 100644
index 0000000..aacd42a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @hmc_fn_id: hmc function id
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @hmc_fn_id: hmc function id
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ * @hmc_fn_id: hmc function id
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
new file mode 100644
index 0000000..a695b91
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -0,0 +1,1006 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ i40e_status ret_code = 0;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+
+ if (!i40e_prep_remove_pd_page(hmc_info, idx))
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+
+ if (!i40e_prep_remove_sd_bp(hmc_info, idx))
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = false;
+ u32 sd_idx, sd_lmt;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1), true);
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = true;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ break;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ i40e_remove_pd_bp(
+ hw,
+ info->hmc_info,
+ i,
+ true);
+ }
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ i40e_status ret_code = 0;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if ((ret_code) &&
+ (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ break;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
+ j, true);
+ if (ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ i40e_status ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { 0 }
+};
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
+
+ return 0;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u16 shift_width;
+ u64 bitfield;
+ u8 hi_byte;
+ u8 hi_mask;
+ u64 t_bits;
+ u64 mask;
+ u8 *p;
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+ /* clear out the field */
+ bitfield = 0;
+
+ /* copy from the next struct field */
+ p = dest + ce_info[f].offset;
+ switch (ce_info[f].size_of) {
+ case 1:
+ bitfield = *p;
+ break;
+ case 2:
+ bitfield = cpu_to_le16(*(u16 *)p);
+ break;
+ case 4:
+ bitfield = cpu_to_le32(*(u32 *)p);
+ break;
+ case 8:
+ bitfield = cpu_to_le64(*(u64 *)p);
+ break;
+ }
+
+ /* prepare the bits and mask */
+ shift_width = ce_info[f].lsb % 8;
+ mask = ((u64)1 << ce_info[f].width) - 1;
+
+ /* save upper bytes for special case */
+ hi_mask = (u8)((mask >> 56) & 0xff);
+ hi_byte = (u8)((bitfield >> 56) & 0xff);
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ bitfield <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ p = context_bytes + (ce_info[f].lsb / 8);
+ memcpy(&t_bits, p, sizeof(u64));
+
+ t_bits &= ~mask; /* get the bits not changing */
+ t_bits |= bitfield; /* add in the new bits */
+
+ /* put it all back */
+ memcpy(p, &t_bits, sizeof(u64));
+
+ /* deal with the special case if needed
+ * example: 62 bit field that starts in bit 5 of first byte
+ * will overlap 3 bits into byte 9
+ */
+ if ((shift_width + ce_info[f].width) > 64) {
+ u8 byte;
+
+ hi_mask >>= (8 - shift_width);
+ hi_byte >>= (8 - shift_width);
+ byte = p[8] & ~hi_mask; /* get the bits not changing */
+ byte |= hi_byte; /* add in the new bits */
+ p[8] = byte; /* put it back */
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hmc_info: pointer to i40e_hmc_info struct
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+static
+i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+ u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+
+ if (NULL == hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
new file mode 100644
index 0000000..00ff35006
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -0,0 +1,169 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u8 cpuid;
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u8 dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u8 hbuff;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u16 rxmax;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u8 lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u16 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
new file mode 100644
index 0000000..601d482
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -0,0 +1,7375 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Local includes */
+#include "i40e.h"
+
+const char i40e_driver_name[] = "i40e";
+static const char i40e_driver_string[] =
+ "Intel(R) Ethernet Connection XL710 Network Driver";
+
+#define DRV_KERN "-k"
+
+#define DRV_VERSION_MAJOR 0
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 9
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+ __stringify(DRV_VERSION_MINOR) "." \
+ __stringify(DRV_VERSION_BUILD) DRV_KERN
+const char i40e_driver_version_str[] = DRV_VERSION;
+static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+
+/* a bit of forward declarations */
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
+static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static int i40e_add_vsi(struct i40e_vsi *vsi);
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
+static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_misc_vector(struct i40e_pf *pf);
+static void i40e_determine_queue_usage(struct i40e_pf *pf);
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+
+/* i40e_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
+
+#define I40E_MAX_VF_COUNT 128
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (mem->va)
+ return 0;
+
+ return -ENOMEM;
+}
+
+/**
+ * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+ dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
+ mem->va = NULL;
+ mem->pa = 0;
+ mem->size = 0;
+
+ return 0;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+ u32 size)
+{
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+
+ return -ENOMEM;
+}
+
+/**
+ * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+ mem->va = NULL;
+ mem->size = 0;
+
+ return 0;
+}
+
+/**
+ * i40e_get_lump - find a lump of free generic resource
+ * @pf: board private structure
+ * @pile: the pile of resource to search
+ * @needed: the number of items needed
+ * @id: an owner id to stick on the items assigned
+ *
+ * Returns the base item index of the lump, or negative for error
+ *
+ * The search_hint trick and lack of advanced fit-finding only work
+ * because we're highly likely to have all the same size lump requests.
+ * Linear search time and any fragmentation should be minimal.
+ **/
+static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
+ u16 needed, u16 id)
+{
+ int ret = -ENOMEM;
+ int i = 0;
+ int j = 0;
+
+ if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
+ dev_info(&pf->pdev->dev,
+ "param err: pile=%p needed=%d id=0x%04x\n",
+ pile, needed, id);
+ return -EINVAL;
+ }
+
+ /* start the linear search with an imperfect hint */
+ i = pile->search_hint;
+ while (i < pile->num_entries && ret < 0) {
+ /* skip already allocated entries */
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ i++;
+ continue;
+ }
+
+ /* do we have enough in this lump? */
+ for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
+ if (pile->list[i+j] & I40E_PILE_VALID_BIT)
+ break;
+ }
+
+ if (j == needed) {
+ /* there was enough, so assign it to the requestor */
+ for (j = 0; j < needed; j++)
+ pile->list[i+j] = id | I40E_PILE_VALID_BIT;
+ ret = i;
+ pile->search_hint = i + j;
+ } else {
+ /* not enough, so skip over it and continue looking */
+ i += j;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_put_lump - return a lump of generic resource
+ * @pile: the pile of resource to search
+ * @index: the base item index
+ * @id: the owner id of the items assigned
+ *
+ * Returns the count of items in the lump
+ **/
+static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
+{
+ int valid_id = (id | I40E_PILE_VALID_BIT);
+ int count = 0;
+ int i;
+
+ if (!pile || index >= pile->num_entries)
+ return -EINVAL;
+
+ for (i = index;
+ i < pile->num_entries && pile->list[i] == valid_id;
+ i++) {
+ pile->list[i] = 0;
+ count++;
+ }
+
+ if (count && index < pile->search_hint)
+ pile->search_hint = index;
+
+ return count;
+}
+
+/**
+ * i40e_service_event_schedule - Schedule the service task to wake up
+ * @pf: board private structure
+ *
+ * If not already scheduled, this puts the task into the work queue
+ **/
+static void i40e_service_event_schedule(struct i40e_pf *pf)
+{
+ if (!test_bit(__I40E_DOWN, &pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
+ !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ schedule_work(&pf->service_task);
+}
+
+/**
+ * i40e_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ *
+ * If any port has noticed a Tx timeout, it is likely that the whole
+ * device is munged, not just the one netdev port, so go for the full
+ * reset.
+ **/
+static void i40e_tx_timeout(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ pf->tx_timeout_count++;
+
+ if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
+ pf->tx_timeout_recovery_level = 0;
+ pf->tx_timeout_last_recovery = jiffies;
+ netdev_info(netdev, "tx_timeout recovery level %d\n",
+ pf->tx_timeout_recovery_level);
+
+ switch (pf->tx_timeout_recovery_level) {
+ case 0:
+ /* disable and re-enable queues for the VSI */
+ if (in_interrupt()) {
+ set_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+ } else {
+ i40e_vsi_reinit_locked(vsi);
+ }
+ break;
+ case 1:
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ break;
+ case 2:
+ set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ break;
+ case 3:
+ set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ break;
+ default:
+ netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ i40e_down(vsi);
+ break;
+ }
+ i40e_service_event_schedule(pf);
+ pf->tx_timeout_recovery_level++;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_get_vsi_stats_struct - Get System Network Statistics
+ * @vsi: the VSI we care about
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+{
+ return &vsi->net_stats;
+}
+
+/**
+ * i40e_get_netdev_stats_struct - Get statistics for netdev interface
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+ struct net_device *netdev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ *storage = *i40e_get_vsi_stats_struct(vsi);
+
+ return storage;
+}
+
+/**
+ * i40e_vsi_reset_stats - Resets all stats of the given vsi
+ * @vsi: the VSI to have its stats reset
+ **/
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
+{
+ struct rtnl_link_stats64 *ns;
+ int i;
+
+ if (!vsi)
+ return;
+
+ ns = i40e_get_vsi_stats_struct(vsi);
+ memset(ns, 0, sizeof(*ns));
+ memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
+ memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
+ memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
+ if (vsi->rx_rings)
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memset(&vsi->rx_rings[i].rx_stats, 0 ,
+ sizeof(vsi->rx_rings[i].rx_stats));
+ memset(&vsi->tx_rings[i].tx_stats, 0,
+ sizeof(vsi->tx_rings[i].tx_stats));
+ }
+ vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_pf_reset_stats - Reset all of the stats for the given pf
+ * @pf: the PF to be reset
+ **/
+void i40e_pf_reset_stats(struct i40e_pf *pf)
+{
+ memset(&pf->stats, 0, sizeof(pf->stats));
+ memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
+ pf->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_stat_update48 - read and update a 48 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @hireg: the high 32 bit reg to read
+ * @loreg: the low 32 bit reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero. In the process, we also manage
+ * the potential roll-over.
+ **/
+static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+ } else {
+ new_data = rd64(hw, loreg);
+ }
+ if (!offset_loaded)
+ *offset = new_data;
+ if (likely(new_data >= *offset))
+ *stat = new_data - *offset;
+ else
+ *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * i40e_stat_update32 - read and update a 32 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+ if (likely(new_data >= *offset))
+ *stat = (u32)(new_data - *offset);
+ else
+ *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+}
+
+/**
+ * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
+ * @vsi: the VSI to be updated
+ **/
+void i40e_update_eth_stats(struct i40e_vsi *vsi)
+{
+ int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_discards, &es->rx_discards);
+
+ i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+ I40E_GLV_GORCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+ I40E_GLV_UPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+ I40E_GLV_MPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+ I40E_GLV_BPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+ I40E_GLV_GOTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+ I40E_GLV_UPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+ I40E_GLV_MPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+ I40E_GLV_BPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_veb_stats - Update Switch component statistics
+ * @veb: the VEB being updated
+ **/
+static void i40e_update_veb_stats(struct i40e_veb *veb)
+{
+ struct i40e_pf *pf = veb->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+ int idx = 0;
+
+ idx = veb->stats_idx;
+ es = &veb->stats;
+ oes = &veb->stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_discards, &es->tx_discards);
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+
+ i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ veb->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
+ **/
+static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw *hw = &pf->hw;
+ u64 xoff = 0;
+ u16 i, v;
+
+ if ((hw->fc.current_mode != I40E_FC_FULL) &&
+ (hw->fc.current_mode != I40E_FC_RX_PAUSE))
+ return;
+
+ xoff = nsd->link_xoff_rx;
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_rx, &nsd->link_xoff_rx);
+
+ /* No new LFC xoff rx */
+ if (!(nsd->link_xoff_rx - xoff))
+ return;
+
+ /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ if (!vsi)
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *ring = &vsi->tx_rings[i];
+ clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+ }
+ }
+}
+
+/**
+ * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in PFC mode
+ **/
+static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
+ struct i40e_dcbx_config *dcb_cfg;
+ struct i40e_hw *hw = &pf->hw;
+ u16 i, v;
+ u8 tc;
+
+ dcb_cfg = &hw->local_dcbx_config;
+
+ /* See if DCB enabled with PFC TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
+ !(dcb_cfg->pfc.pfcenable)) {
+ i40e_update_link_xoff_rx(pf);
+ return;
+ }
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ u64 prio_xoff = nsd->priority_xoff_rx[i];
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_rx[i],
+ &nsd->priority_xoff_rx[i]);
+
+ /* No new PFC xoff rx */
+ if (!(nsd->priority_xoff_rx[i] - prio_xoff))
+ continue;
+ /* Get the TC for given priority */
+ tc = dcb_cfg->etscfg.prioritytable[i];
+ xoff[tc] = true;
+ }
+
+ /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ if (!vsi)
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *ring = &vsi->tx_rings[i];
+
+ tc = ring->dcb_tc;
+ if (xoff[tc])
+ clear_bit(__I40E_HANG_CHECK_ARMED,
+ &ring->state);
+ }
+ }
+}
+
+/**
+ * i40e_update_stats - Update the board statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * There are a few instances where we store the same stat in a
+ * couple of different structs. This is partly because we have
+ * the netdev stats that need to be filled out, which is slightly
+ * different from the "eth_stats" defined by the chip and used in
+ * VF communications. We sort it all out here in a central place.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct rtnl_link_stats64 *ons;
+ struct rtnl_link_stats64 *ns; /* netdev stats */
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+ u32 tx_restart, tx_busy;
+ u32 rx_page, rx_buf;
+ u64 rx_p, rx_b;
+ u64 tx_p, tx_b;
+ int i;
+ u16 q;
+
+ if (test_bit(__I40E_DOWN, &vsi->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ ns = i40e_get_vsi_stats_struct(vsi);
+ ons = &vsi->net_stats_offsets;
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the netdev and vsi stats that the driver collects
+ * on the fly during packet processing
+ */
+ rx_b = rx_p = 0;
+ tx_b = tx_p = 0;
+ tx_restart = tx_busy = 0;
+ rx_page = 0;
+ rx_buf = 0;
+ for (q = 0; q < vsi->num_queue_pairs; q++) {
+ struct i40e_ring *p;
+
+ p = &vsi->rx_rings[q];
+ rx_b += p->rx_stats.bytes;
+ rx_p += p->rx_stats.packets;
+ rx_buf += p->rx_stats.alloc_rx_buff_failed;
+ rx_page += p->rx_stats.alloc_rx_page_failed;
+
+ p = &vsi->tx_rings[q];
+ tx_b += p->tx_stats.bytes;
+ tx_p += p->tx_stats.packets;
+ tx_restart += p->tx_stats.restart_queue;
+ tx_busy += p->tx_stats.tx_busy;
+ }
+ vsi->tx_restart = tx_restart;
+ vsi->tx_busy = tx_busy;
+ vsi->rx_page_failed = rx_page;
+ vsi->rx_buf_failed = rx_buf;
+
+ ns->rx_packets = rx_p;
+ ns->rx_bytes = rx_b;
+ ns->tx_packets = tx_p;
+ ns->tx_bytes = tx_b;
+
+ i40e_update_eth_stats(vsi);
+ /* update netdev stats from eth stats */
+ ons->rx_errors = oes->rx_errors;
+ ns->rx_errors = es->rx_errors;
+ ons->tx_errors = oes->tx_errors;
+ ns->tx_errors = es->tx_errors;
+ ons->multicast = oes->rx_multicast;
+ ns->multicast = es->rx_multicast;
+ ons->tx_dropped = oes->tx_discards;
+ ns->tx_dropped = es->tx_discards;
+
+ /* Get the port data only if this is the main PF VSI */
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+
+ i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_discards,
+ &nsd->eth.tx_discards);
+ i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+
+ i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
+
+ i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
+ ns->rx_crc_errors = nsd->crc_errors;
+
+ i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
+ ns->rx_errors = nsd->crc_errors
+ + nsd->illegal_bytes;
+
+ i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
+ ns->rx_length_errors = nsd->rx_length_errors;
+
+ i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ i40e_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
+ }
+
+ i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+ }
+
+ pf->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (vlan == f->vlan) &&
+ (!is_vf || f->is_vf) &&
+ (!is_netdev || f->is_netdev))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_find_mac - Find a mac addr in the macvlan filters list
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address we are searching for
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns the first filter with the provided MAC address or NULL if
+ * MAC address was not found
+ **/
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (!is_vf || f->is_vf) &&
+ (!is_netdev || f->is_netdev))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
+ * @vsi: the VSI to be searched
+ *
+ * Returns true if VSI is in vlan mode or false otherwise
+ **/
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+
+ /* Only -1 for all the filters denotes not in vlan mode
+ * so we have to go through all the list in order to make sure
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan >= 0)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ * @is_vf: true if it is a vf
+ * @is_netdev: true if it is a netdev
+ *
+ * Goes through all the macvlan filters and adds a
+ * macvlan filter for each unique vlan that already exists
+ *
+ * Returns first filter found on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (!i40e_find_filter(vsi, macaddr, f->vlan,
+ is_vf, is_netdev)) {
+ if (!i40e_add_filter(vsi, macaddr, f->vlan,
+ is_vf, is_netdev))
+ return NULL;
+ }
+ }
+
+ return list_first_entry_or_null(&vsi->mac_filter_list,
+ struct i40e_mac_filter, list);
+}
+
+/**
+ * i40e_add_filter - Add a mac/vlan filter to the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ if (!f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ goto add_filter_out;
+
+ memcpy(f->macaddr, macaddr, ETH_ALEN);
+ f->vlan = vlan;
+ f->changed = true;
+
+ INIT_LIST_HEAD(&f->list);
+ list_add(&f->list, &vsi->mac_filter_list);
+ }
+
+ /* increment counter and add a new flag if needed */
+ if (is_vf) {
+ if (!f->is_vf) {
+ f->is_vf = true;
+ f->counter++;
+ }
+ } else if (is_netdev) {
+ if (!f->is_netdev) {
+ f->is_netdev = true;
+ f->counter++;
+ }
+ } else {
+ f->counter++;
+ }
+
+ /* changed tells sync_filters_subtask to
+ * push the filter down to the firmware
+ */
+ if (f->changed) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+
+add_filter_out:
+ return f;
+}
+
+/**
+ * i40e_del_filter - Remove a mac/vlan filter from the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure it's a vf filter, else doesn't matter
+ * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ **/
+void i40e_del_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return;
+
+ f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ if (!f || f->counter == 0)
+ return;
+
+ if (is_vf) {
+ if (f->is_vf) {
+ f->is_vf = false;
+ f->counter--;
+ }
+ } else if (is_netdev) {
+ if (f->is_netdev) {
+ f->is_netdev = false;
+ f->counter--;
+ }
+ } else {
+ /* make sure we don't remove a filter in use by vf or netdev */
+ int min_f = 0;
+ min_f += (f->is_vf ? 1 : 0);
+ min_f += (f->is_netdev ? 1 : 0);
+
+ if (f->counter > min_f)
+ f->counter--;
+ }
+
+ /* counter == 0 tells sync_filters_subtask to
+ * remove the filter from the firmware's list
+ */
+ if (f->counter == 0) {
+ f->changed = true;
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
+/**
+ * i40e_set_mac - NDO callback to set mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct sockaddr *addr = p;
+ struct i40e_mac_filter *f;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ i40e_status ret;
+ ret = i40e_aq_mac_address_write(&vsi->back->hw,
+ I40E_AQC_WRITE_TYPE_LAA_ONLY,
+ addr->sa_data, NULL);
+ if (ret) {
+ netdev_info(netdev,
+ "Addr change for Main VSI failed: %d\n",
+ ret);
+ return -EADDRNOTAVAIL;
+ }
+
+ memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
+ }
+
+ /* In order to be sure to not drop any packets, add the new address
+ * then delete the old one.
+ */
+ f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
+ if (!f)
+ return -ENOMEM;
+
+ i40e_sync_vsi_filters(vsi);
+ i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
+ i40e_sync_vsi_filters(vsi);
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
+ * @vsi: the VSI being setup
+ * @ctxt: VSI context structure
+ * @enabled_tc: Enabled TCs bitmap
+ * @is_add: True if called before Add VSI
+ *
+ * Setup VSI queue mapping for enabled traffic classes.
+ **/
+static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt,
+ u8 enabled_tc,
+ bool is_add)
+{
+ struct i40e_pf *pf = vsi->back;
+ u16 sections = 0;
+ u8 netdev_tc = 0;
+ u16 numtc = 0;
+ u16 qcount;
+ u8 offset;
+ u16 qmap;
+ int i;
+
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ offset = 0;
+
+ if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Find numtc from enabled TC bitmap */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i)) /* TC is enabled */
+ numtc++;
+ }
+ if (!numtc) {
+ dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
+ numtc = 1;
+ }
+ } else {
+ /* At least TC0 is enabled in case of non-DCB case */
+ numtc = 1;
+ }
+
+ vsi->tc_config.numtc = numtc;
+ vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+
+ /* Setup queue offset/count for all TCs for given VSI */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* See if the given TC is enabled for the given VSI */
+ if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+ int pow, num_qps;
+
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ if (i == 0)
+ qcount = pf->rss_size;
+ else
+ qcount = pf->num_tc_qps;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ break;
+ case I40E_VSI_FDIR:
+ case I40E_VSI_SRIOV:
+ case I40E_VSI_VMDQ2:
+ default:
+ qcount = vsi->alloc_queue_pairs;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ WARN_ON(i != 0);
+ break;
+ }
+
+ /* find the power-of-2 of the number of queue pairs */
+ num_qps = vsi->tc_config.tc_info[i].qcount;
+ pow = 0;
+ while (num_qps &&
+ ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+ pow++;
+ num_qps >>= 1;
+ }
+
+ vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+ qmap =
+ (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ offset += vsi->tc_config.tc_info[i].qcount;
+ } else {
+ /* TC is not enabled so set the offset to
+ * default queue and allocate one queue
+ * for the given TC.
+ */
+ vsi->tc_config.tc_info[i].qoffset = 0;
+ vsi->tc_config.tc_info[i].qcount = 1;
+ vsi->tc_config.tc_info[i].netdev_tc = 0;
+
+ qmap = 0;
+ }
+ ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_queue_pairs = offset;
+
+ /* Scheduler section valid can only be set for ADD VSI */
+ if (is_add) {
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+ ctxt->info.up_enable_bits = enabled_tc;
+ }
+ if (vsi->type == I40E_VSI_SRIOV) {
+ ctxt->info.mapping_flags |=
+ cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ ctxt->info.queue_mapping[i] =
+ cpu_to_le16(vsi->base_queue + i);
+ } else {
+ ctxt->info.mapping_flags |=
+ cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+ }
+ ctxt->info.valid_sections |= cpu_to_le16(sections);
+}
+
+/**
+ * i40e_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40e_set_rx_mode(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_vsi *vsi = np->vsi;
+ struct netdev_hw_addr *uca;
+ struct netdev_hw_addr *mca;
+ struct netdev_hw_addr *ha;
+
+ /* add addr if not already in the filter list */
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (!i40e_find_mac(vsi, uca->addr, false, true)) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ i40e_put_mac_in_vlan(vsi, uca->addr,
+ false, true);
+ else
+ i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
+ false, true);
+ }
+ }
+
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (!i40e_find_mac(vsi, mca->addr, false, true)) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ i40e_put_mac_in_vlan(vsi, mca->addr,
+ false, true);
+ else
+ i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
+ false, true);
+ }
+ }
+
+ /* remove filter if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ bool found = false;
+
+ if (!f->is_netdev)
+ continue;
+
+ if (is_multicast_ether_addr(f->macaddr)) {
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (ether_addr_equal(mca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (ether_addr_equal(uca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+
+ for_each_dev_addr(netdev, ha) {
+ if (ether_addr_equal(ha->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found)
+ i40e_del_filter(
+ vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+ }
+
+ /* check for other flag changes */
+ if (vsi->current_netdev_flags != vsi->netdev->flags) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
+/**
+ * i40e_sync_vsi_filters - Update the VSI filter list to the HW
+ * @vsi: ptr to the VSI
+ *
+ * Push any outstanding VSI filter changes through the AdminQ.
+ *
+ * Returns 0 or error value
+ **/
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+ bool promisc_forced_on = false;
+ bool add_happened = false;
+ int filter_list_len = 0;
+ u32 changed_flags = 0;
+ i40e_status ret = 0;
+ struct i40e_pf *pf;
+ int num_add = 0;
+ int num_del = 0;
+ u16 cmd_flags;
+
+ /* empty array typed pointers, kcalloc later */
+ struct i40e_aqc_add_macvlan_element_data *add_list;
+ struct i40e_aqc_remove_macvlan_element_data *del_list;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
+ usleep_range(1000, 2000);
+ pf = vsi->back;
+
+ if (vsi->netdev) {
+ changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
+ vsi->current_netdev_flags = vsi->netdev->flags;
+ }
+
+ if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
+ vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
+
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_remove_macvlan_element_data),
+ GFP_KERNEL);
+ if (!del_list)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter != 0)
+ continue;
+ f->changed = false;
+ cmd_flags = 0;
+
+ /* add to delete list */
+ memcpy(del_list[num_del].mac_addr,
+ f->macaddr, ETH_ALEN);
+ del_list[num_del].vlan_tag =
+ cpu_to_le16((u16)(f->vlan ==
+ I40E_VLAN_ANY ? 0 : f->vlan));
+
+ /* vlan0 as wild card to allow packets from all vlans */
+ if (f->vlan == I40E_VLAN_ANY ||
+ (vsi->netdev && !(vsi->netdev->features &
+ NETIF_F_HW_VLAN_CTAG_FILTER)))
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ del_list[num_del].flags = cmd_flags;
+ num_del++;
+
+ /* unlink from filter list */
+ list_del(&f->list);
+ kfree(f);
+
+ /* flush a full buffer */
+ if (num_del == filter_list_len) {
+ ret = i40e_aq_remove_macvlan(&pf->hw,
+ vsi->seid, del_list, num_del,
+ NULL);
+ num_del = 0;
+ memset(del_list, 0, sizeof(*del_list));
+
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
+ ret,
+ pf->hw.aq.asq_last_status);
+ }
+ }
+ if (num_del) {
+ ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+ del_list, num_del, NULL);
+ num_del = 0;
+
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+
+ kfree(del_list);
+ del_list = NULL;
+
+ /* do all the adds now */
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_add_macvlan_element_data),
+ add_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_add_macvlan_element_data),
+ GFP_KERNEL);
+ if (!add_list)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter == 0)
+ continue;
+ f->changed = false;
+ add_happened = true;
+ cmd_flags = 0;
+
+ /* add to add array */
+ memcpy(add_list[num_add].mac_addr,
+ f->macaddr, ETH_ALEN);
+ add_list[num_add].vlan_tag =
+ cpu_to_le16(
+ (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
+ add_list[num_add].queue_number = 0;
+
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+
+ /* vlan0 as wild card to allow packets from all vlans */
+ if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
+ !(vsi->netdev->features &
+ NETIF_F_HW_VLAN_CTAG_FILTER)))
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ add_list[num_add].flags = cpu_to_le16(cmd_flags);
+ num_add++;
+
+ /* flush a full buffer */
+ if (num_add == filter_list_len) {
+ ret = i40e_aq_add_macvlan(&pf->hw,
+ vsi->seid,
+ add_list,
+ num_add,
+ NULL);
+ num_add = 0;
+
+ if (ret)
+ break;
+ memset(add_list, 0, sizeof(*add_list));
+ }
+ }
+ if (num_add) {
+ ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add, NULL);
+ num_add = 0;
+ }
+ kfree(add_list);
+ add_list = NULL;
+
+ if (add_happened && (!ret)) {
+ /* do nothing */;
+ } else if (add_happened && (ret)) {
+ dev_info(&pf->pdev->dev,
+ "add filter failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
+ !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state)) {
+ promisc_forced_on = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state);
+ dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
+ }
+ }
+ }
+
+ /* check for changes in promiscuous modes */
+ if (changed_flags & IFF_ALLMULTI) {
+ bool cur_multipromisc;
+ cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
+ ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "set multi promisc failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+ if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
+ bool cur_promisc;
+ cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
+ test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state));
+ ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc,
+ NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "set uni promisc failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+
+ clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ return 0;
+}
+
+/**
+ * i40e_sync_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+{
+ int v;
+
+ if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
+ return;
+ pf->flags &= ~I40E_FLAG_FILTER_SYNC;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v] &&
+ (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
+ i40e_sync_vsi_filters(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ struct i40e_vsi *vsi = np->vsi;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+ return -EINVAL;
+
+ netdev_info(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ if (netif_running(netdev))
+ i40e_vsi_reinit_locked(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
+ return; /* already enabled */
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+ I40E_AQ_VSI_PVLAN_EMOD_MASK))
+ return; /* already disabled */
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_vlan_rx_register - Setup or shutdown vlan offload
+ * @netdev: network interface to be adjusted
+ * @features: netdev features to test if VLAN offload is enabled or not
+ **/
+static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ i40e_vlan_stripping_enable(vsi);
+ else
+ i40e_vlan_stripping_disable(vsi);
+}
+
+/**
+ * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ struct i40e_mac_filter *f, *add_f;
+ bool is_netdev, is_vf;
+ int ret;
+
+ is_vf = (vsi->type == I40E_VSI_SRIOV);
+ is_netdev = !!(vsi->netdev);
+
+ if (is_netdev) {
+ add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add vlan filter %d for %pM\n",
+ vid, vsi->netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add vlan filter %d for %pM\n",
+ vid, f->macaddr);
+ return -ENOMEM;
+ }
+ }
+
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not sync filters for vid %d\n", vid);
+ return ret;
+ }
+
+ /* Now if we add a vlan tag, make sure to check if it is the first
+ * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
+ * with 0, so we now accept untagged and specified tagged traffic
+ * (and not any taged and untagged)
+ */
+ if (vid > 0) {
+ if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
+ I40E_VLAN_ANY,
+ is_vf, is_netdev)) {
+ i40e_del_filter(vsi, vsi->netdev->dev_addr,
+ I40E_VLAN_ANY, is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ vsi->netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev)) {
+ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr,
+ 0, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ f->macaddr);
+ return -ENOMEM;
+ }
+ }
+ }
+ ret = i40e_sync_vsi_filters(vsi);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct i40e_mac_filter *f, *add_f;
+ bool is_vf, is_netdev;
+ int filter_count = 0;
+ int ret;
+
+ is_vf = (vsi->type == I40E_VSI_SRIOV);
+ is_netdev = !!(netdev);
+
+ if (is_netdev)
+ i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
+ return ret;
+ }
+
+ /* go through all the filters for this VSI and if there is only
+ * vid == 0 it means there are no other filters, so vid 0 must
+ * be replaced with -1. This signifies that we should from now
+ * on accept any traffic (with any tag present, or untagged)
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (is_netdev) {
+ if (f->vlan &&
+ ether_addr_equal(netdev->dev_addr, f->macaddr))
+ filter_count++;
+ }
+
+ if (f->vlan)
+ filter_count++;
+ }
+
+ if (!filter_count && is_netdev) {
+ i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
+ f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ if (!f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter %d for %pM\n",
+ I40E_VLAN_ANY, netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ if (!filter_count) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter %d for %pM\n",
+ I40E_VLAN_ANY, f->macaddr);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return i40e_sync_vsi_filters(vsi);
+}
+
+/**
+ * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be added
+ **/
+static int i40e_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ int ret;
+
+ if (vid > 4095)
+ return 0;
+
+ netdev_info(vsi->netdev, "adding %pM vid=%d\n",
+ netdev->dev_addr, vid);
+ /* If the network stack called us with vid = 0, we should
+ * indicate to i40e_vsi_add_vlan() that we want to receive
+ * any traffic (i.e. with any vlan tag, or untagged)
+ */
+ ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
+
+ if (!ret) {
+ if (vid < VLAN_N_VID)
+ set_bit(vid, vsi->active_vlans);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be removed
+ **/
+static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ netdev_info(vsi->netdev, "removing %pM vid=%d\n",
+ netdev->dev_addr, vid);
+ /* return code is ignored as there is nothing a user
+ * can do about failure to remove and a log message was
+ * already printed from another function
+ */
+ i40e_vsi_kill_vlan(vsi, vid);
+
+ clear_bit(vid, vsi->active_vlans);
+ return 0;
+}
+
+/**
+ * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
+ * @vsi: the vsi being brought back up
+ **/
+static void i40e_restore_vlan(struct i40e_vsi *vsi)
+{
+ u16 vid;
+
+ if (!vsi->netdev)
+ return;
+
+ i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+
+ for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
+ i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
+ vid);
+}
+
+/**
+ * i40e_vsi_add_pvid - Add pvid for the VSI
+ * @vsi: the vsi being adjusted
+ * @vid: the vlan id to set as a PVID
+ **/
+i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = cpu_to_le16(vid);
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_remove_pvid - Remove the pvid from the VSI
+ * @vsi: the vsi being adjusted
+ *
+ * Just use the vlan_rx_register() service to put it back to normal
+ **/
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
+{
+ vsi->info.pvid = 0;
+ i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+}
+
+/**
+ * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
+{
+ int i, err = 0;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free VSI's transmit software resources
+ **/
+static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
+{
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i].desc)
+ i40e_free_tx_resources(&vsi->tx_rings[i]);
+}
+
+/**
+ * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
+{
+ int i, err = 0;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+ return err;
+}
+
+/**
+ * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free all receive software resources
+ **/
+static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
+{
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->rx_rings[i].desc)
+ i40e_free_rx_resources(&vsi->rx_rings[i]);
+}
+
+/**
+ * i40e_configure_tx_ring - Configure a transmit ring context and rest
+ * @ring: The Tx ring to configure
+ *
+ * Configure the Tx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_tx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_txq tx_ctx;
+ i40e_status err = 0;
+ u32 qtx_ctl = 0;
+
+ /* some ATR related tx ring init */
+ if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+ ring->atr_sample_rate = vsi->back->atr_sample_rate;
+ ring->atr_count = 0;
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ /* initialize XPS */
+ if (ring->q_vector && ring->netdev &&
+ !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->affinity_mask,
+ ring->queue_index);
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+
+ tx_ctx.new_context = 1;
+ tx_ctx.base = (ring->dma / 128);
+ tx_ctx.qlen = ring->count;
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED));
+
+ /* As part of VSI creation/update, FW allocates certain
+ * Tx arbitration queue sets for each TC enabled for
+ * the VSI. The FW returns the handles to these queue
+ * sets as part of the response buffer to Add VSI,
+ * Update VSI, etc. AQ commands. It is expected that
+ * these queue set handles be associated with the Tx
+ * queues by the driver as part of the TX queue context
+ * initialization. This has to be done regardless of
+ * DCB as by default everything is mapped to TC0.
+ */
+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+ tx_ctx.rdylist_act = 0;
+
+ /* clear the context in the HMC */
+ err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* set the context in the HMC */
+ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* Now associate this queue with this PCI function */
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ & I40E_QTX_CTL_PF_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+ i40e_flush(hw);
+
+ clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+
+ /* cache tail off for easier writes later */
+ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+ return 0;
+}
+
+/**
+ * i40e_configure_rx_ring - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_rx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ i40e_status err = 0;
+
+ ring->state = 0;
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
+ ring->rx_hdr_len = vsi->rx_hdr_len;
+
+ rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rx_ctx.base = (ring->dma / 128);
+ rx_ctx.qlen = ring->count;
+
+ if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
+ set_ring_16byte_desc_enabled(ring);
+ rx_ctx.dsize = 0;
+ } else {
+ rx_ctx.dsize = 1;
+ }
+
+ rx_ctx.dtype = vsi->dtype;
+ if (vsi->dtype) {
+ set_ring_ps_enabled(ring);
+ rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
+ I40E_RX_SPLIT_IP |
+ I40E_RX_SPLIT_TCP_UDP |
+ I40E_RX_SPLIT_SCTP;
+ } else {
+ rx_ctx.hsplit_0 = 0;
+ }
+
+ rx_ctx.rxmax = min_t(u16, vsi->max_frame,
+ (chain_len * ring->rx_buf_len));
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 1;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 1;
+
+ /* clear the context in the HMC */
+ err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* set the context in the HMC */
+ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* cache tail for quicker writes, and clear the reg before use */
+ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+
+ i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_configure_tx - Configure the VSI for Tx
+ * @vsi: VSI structure describing this set of rings and resources
+ *
+ * Configure the Tx VSI for operation.
+ **/
+static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
+ err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_configure_rx - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Configure the Rx VSI for operation.
+ **/
+static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
+ vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
+ + ETH_FCS_LEN + VLAN_HLEN;
+ else
+ vsi->max_frame = I40E_RXBUFFER_2048;
+
+ /* figure out correct receive buffer length */
+ switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
+ I40E_FLAG_RX_PS_ENABLED)) {
+ case I40E_FLAG_RX_1BUF_ENABLED:
+ vsi->rx_hdr_len = 0;
+ vsi->rx_buf_len = vsi->max_frame;
+ vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
+ break;
+ case I40E_FLAG_RX_PS_ENABLED:
+ vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+ vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
+ break;
+ default:
+ vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+ vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
+ break;
+ }
+
+ /* round up for the chip's needs */
+ vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
+ (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+
+ /* set up individual rings */
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
+{
+ u16 qoffset, qcount;
+ int i, n;
+
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
+ if (!(vsi->tc_config.enabled_tc & (1 << n)))
+ continue;
+
+ qoffset = vsi->tc_config.tc_info[n].qoffset;
+ qcount = vsi->tc_config.tc_info[n].qcount;
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ struct i40e_ring *rx_ring = &vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ rx_ring->dcb_tc = n;
+ tx_ring->dcb_tc = n;
+ }
+ }
+}
+
+/**
+ * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
+{
+ if (vsi->netdev)
+ i40e_set_rx_mode(vsi->netdev);
+}
+
+/**
+ * i40e_vsi_configure - Set up the VSI for action
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_configure(struct i40e_vsi *vsi)
+{
+ int err;
+
+ i40e_set_vsi_rx_mode(vsi);
+ i40e_restore_vlan(vsi);
+ i40e_vsi_config_dcb_rings(vsi);
+ err = i40e_vsi_configure_tx(vsi);
+ if (!err)
+ err = i40e_vsi_configure_rx(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_q_vector *q_vector;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vector;
+ int i, q;
+ u32 val;
+ u32 qp;
+
+ /* The interrupt indexing is offset by 1 in the PFINT_ITRn
+ * and PFINT_LNKLSTn registers, e.g.:
+ * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
+ */
+ qp = vsi->base_queue;
+ vector = vsi->base_vector;
+ q_vector = vsi->q_vectors;
+ for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
+ q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
+ q_vector->tx.itr);
+
+ /* Linked list for the queuepairs assigned to this vector */
+ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
+ for (q = 0; q < q_vector->num_ringpairs; q++) {
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_TX
+ << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_RX
+ << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ /* Terminate the linked list */
+ if (q == (q_vector->num_ringpairs - 1))
+ val |= (I40E_QUEUE_END_OF_LIST
+ << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ qp++;
+ }
+ }
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_enable_misc_int_causes - enable the non-queue interrupts
+ * @hw: ptr to the hardware info
+ **/
+static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
+{
+ u32 val;
+
+ /* clear things first */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
+ rd32(hw, I40E_PFINT_ICR0); /* read to clear */
+
+ val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_GRST_MASK |
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
+ I40E_PFINT_ICR0_ENA_GPIO_MASK |
+ I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+
+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+
+ /* OTHER_ITR_IDX = 0 */
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/**
+ * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ /* set the ITR configuration */
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
+
+ i40e_enable_misc_int_causes(hw);
+
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+ /* Associate the queue pair to the vector and enable the q int */
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_RQCTL(0), val);
+
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(0), val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+
+ wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector
+ **/
+void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ return IRQ_HANDLED;
+
+ pr_info("fdir ring cleaning needed\n");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ *
+ * Allocates MSI-X vectors and requests interrupts from the kernel.
+ **/
+static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+{
+ int q_vectors = vsi->num_q_vectors;
+ struct i40e_pf *pf = vsi->back;
+ int base = vsi->base_vector;
+ int rx_int_idx = 0;
+ int tx_int_idx = 0;
+ int vector, err;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+
+ if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(pf->msix_entries[base + vector].vector,
+ vsi->irq_handler,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "%s: request_irq failed, error: %d\n",
+ __func__, err);
+ goto free_queue_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+ &q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+ NULL);
+ free_irq(pf->msix_entries[base + vector].vector,
+ &(vsi->q_vectors[vector]));
+ }
+ return err;
+}
+
+/**
+ * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ **/
+static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+ }
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = vsi->base_vector;
+ i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
+
+ i40e_flush(hw);
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ synchronize_irq(pf->msix_entries[i + base].vector);
+ } else {
+ /* Legacy and MSI mode - this stops all interrupt handling */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+ synchronize_irq(pf->pdev->irq);
+ }
+}
+
+/**
+ * i40e_vsi_enable_irq - Enable IRQ for the given VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = vsi->base_vector;
+ i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ i40e_irq_dynamic_enable(vsi, i);
+ } else {
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_stop_misc_vector - Stop the vector that handles non-queue events
+ * @pf: board private structure
+ **/
+static void i40e_stop_misc_vector(struct i40e_pf *pf)
+{
+ /* Disable ICR 0 */
+ wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
+ i40e_flush(&pf->hw);
+}
+
+/**
+ * i40e_intr - MSI/Legacy and non-queue interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ *
+ * This is the handler used for all MSI/Legacy interrupts, and deals
+ * with both queue and non-queue interrupts. This is also used in
+ * MSIX mode to handle the non-queue interrupts.
+ **/
+static irqreturn_t i40e_intr(int irq, void *data)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)data;
+ struct i40e_hw *hw = &pf->hw;
+ u32 icr0, icr0_remaining;
+ u32 val, ena_mask;
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+ /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+ return IRQ_NONE;
+
+ val = rd32(hw, I40E_PFINT_DYN_CTL0);
+ val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_PFINT_DYN_CTL0, val);
+
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
+ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+
+ /* temporarily disable queue cause for NAPI processing */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+ qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+ qval = rd32(hw, I40E_QINT_TQCTL(0));
+ qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_flush(hw);
+
+ if (!test_bit(__I40E_DOWN, &pf->state))
+ napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
+ val = rd32(hw, I40E_GLGEN_RSTAT);
+ val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+ >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ if (val & I40E_RESET_CORER)
+ pf->corer_count++;
+ else if (val & I40E_RESET_GLOBR)
+ pf->globr_count++;
+ else if (val & I40E_RESET_EMPR)
+ pf->empr_count++;
+ }
+
+ /* If a critical error is pending we have no choice but to reset the
+ * device.
+ * Report and mask out any remaining unexpected interrupts.
+ */
+ icr0_remaining = icr0 & ena_mask;
+ if (icr0_remaining) {
+ dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
+ icr0_remaining);
+ if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ } else {
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ }
+ }
+ ena_mask &= ~icr0_remaining;
+ }
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
+ i40e_flush(hw);
+ if (!test_bit(__I40E_DOWN, &pf->state)) {
+ i40e_service_event_schedule(pf);
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @r_idx: rx queue index
+ **/
+static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+{
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+ struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
+
+ rx_ring->q_vector = q_vector;
+ q_vector->rx.ring[q_vector->rx.count] = rx_ring;
+ q_vector->rx.count++;
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->vsi = vsi;
+}
+
+/**
+ * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @t_idx: tx queue index
+ **/
+static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
+{
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+ struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+
+ tx_ring->q_vector = q_vector;
+ q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->num_ringpairs++;
+ q_vector->vsi = vsi;
+}
+
+/**
+ * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per queue pair, but on a constrained vector budget, we
+ * group the queue pairs as "efficiently" as possible.
+ **/
+static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
+{
+ int qp_remaining = vsi->num_queue_pairs;
+ int q_vectors = vsi->num_q_vectors;
+ int qp_per_vector;
+ int v_start = 0;
+ int qp_idx = 0;
+
+ /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+ * group them so there are multiple queues per vector.
+ */
+ for (; v_start < q_vectors && qp_remaining; v_start++) {
+ qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+ for (; qp_per_vector;
+ qp_per_vector--, qp_idx++, qp_remaining--) {
+ map_vector_to_rxq(vsi, v_start, qp_idx);
+ map_vector_to_txq(vsi, v_start, qp_idx);
+ }
+ }
+}
+
+/**
+ * i40e_vsi_request_irq - Request IRQ from the OS
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ **/
+static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ err = i40e_vsi_request_irq_msix(vsi, basename);
+ else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+ err = request_irq(pf->pdev->irq, i40e_intr, 0,
+ pf->misc_int_name, pf);
+ else
+ err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
+ pf->misc_int_name, pf);
+
+ if (err)
+ dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40e_netpoll - A Polling 'interrupt'handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts. It's not called while the normal interrupt routine is executing.
+ **/
+static void i40e_netpoll(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return;
+
+ pf->flags |= I40E_FLAG_IN_NETPOLL;
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+ } else {
+ i40e_intr(pf->pdev->irq, netdev);
+ }
+ pf->flags &= ~I40E_FLAG_IN_NETPOLL;
+}
+#endif
+
+/**
+ * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int i, j, pf_q;
+ u32 tx_reg;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ j = 1000;
+ do {
+ usleep_range(1000, 2000);
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
+ ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
+
+ if (enable) {
+ /* is STAT set ? */
+ if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
+ dev_info(&pf->pdev->dev,
+ "Tx %d already enabled\n", i);
+ continue;
+ }
+ } else {
+ /* is !STAT set ? */
+ if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
+ dev_info(&pf->pdev->dev,
+ "Tx %d already disabled\n", i);
+ continue;
+ }
+ }
+
+ /* turn on/off the queue */
+ if (enable)
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
+ I40E_QTX_ENA_QENA_STAT_MASK;
+ else
+ tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+
+ wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+
+ /* wait for the change to finish */
+ for (j = 0; j < 10; j++) {
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ if (enable) {
+ if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ }
+
+ udelay(10);
+ }
+ if (j >= 10) {
+ dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
+ pf_q, (enable ? "en" : "dis"));
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int i, j, pf_q;
+ u32 rx_reg;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ j = 1000;
+ do {
+ usleep_range(1000, 2000);
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+ } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
+ ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+
+ if (enable) {
+ /* is STAT set ? */
+ if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
+ } else {
+ /* is !STAT set ? */
+ if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
+ }
+
+ /* turn on/off the queue */
+ if (enable)
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK;
+ else
+ rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK);
+ wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ for (j = 0; j < 10; j++) {
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+
+ if (enable) {
+ if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ }
+
+ udelay(10);
+ }
+ if (j >= 10) {
+ dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
+ pf_q, (enable ? "en" : "dis"));
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_control_rings - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+{
+ int ret;
+
+ /* do rx first for enable and last for disable */
+ if (request) {
+ ret = i40e_vsi_control_rx(vsi, request);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_tx(vsi, request);
+ } else {
+ ret = i40e_vsi_control_tx(vsi, request);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_rx(vsi, request);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_free_irq - Free the irq association with the OS
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u32 val, qp;
+ int i;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (!vsi->q_vectors)
+ return;
+
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ u16 vector = i + base;
+
+ /* free only the irqs that were actually requested */
+ if (vsi->q_vectors[i].num_ringpairs == 0)
+ continue;
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(pf->msix_entries[vector].vector,
+ NULL);
+ free_irq(pf->msix_entries[vector].vector,
+ &vsi->q_vectors[i]);
+
+ /* Tear down the interrupt queue link list
+ *
+ * We know that they come in pairs and always
+ * the Rx first, then the Tx. To clear the
+ * link list, stick the EOL value into the
+ * next_q field of the registers.
+ */
+ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
+ qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+ >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ val |= I40E_QUEUE_END_OF_LIST
+ << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
+
+ while (qp != I40E_QUEUE_END_OF_LIST) {
+ u32 next;
+
+ val = rd32(hw, I40E_QINT_RQCTL(qp));
+
+ val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
+ I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_RQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+ next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
+ >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+
+ val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
+ I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_TQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+ I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ qp = next;
+ }
+ }
+ } else {
+ free_irq(pf->pdev->irq, pf);
+
+ val = rd32(hw, I40E_PFINT_LNKLST0);
+ qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+ >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ val |= I40E_QUEUE_END_OF_LIST
+ << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(qp));
+ val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
+ I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_RQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+ val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
+ I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_TQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+ I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ }
+}
+
+/**
+ * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI being un-configured
+ *
+ * This frees the memory allocated to the q_vectors and
+ * deletes references to the NAPI struct.
+ **/
+static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
+ struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
+ int r_idx;
+
+ if (!q_vector)
+ continue;
+
+ /* disassociate q_vector from rings */
+ for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
+ q_vector->tx.ring[r_idx]->q_vector = NULL;
+ for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
+ q_vector->rx.ring[r_idx]->q_vector = NULL;
+
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+ }
+ kfree(vsi->q_vectors);
+}
+
+/**
+ * i40e_reset_interrupt_capability - Disable interrupt setup in OS
+ * @pf: board private structure
+ **/
+static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
+{
+ /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ pci_disable_msix(pf->pdev);
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+ pci_disable_msi(pf->pdev);
+ }
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+}
+
+/**
+ * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @pf: board private structure
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
+{
+ int i;
+
+ i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i])
+ i40e_vsi_free_q_vectors(pf->vsi[i]);
+ i40e_reset_interrupt_capability(pf);
+}
+
+/**
+ * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_enable_all(struct i40e_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+ napi_enable(&vsi->q_vectors[q_idx].napi);
+}
+
+/**
+ * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_disable_all(struct i40e_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+ napi_disable(&vsi->q_vectors[q_idx].napi);
+}
+
+/**
+ * i40e_quiesce_vsi - Pause a given VSI
+ * @vsi: the VSI being paused
+ **/
+static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
+{
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return;
+
+ set_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ if (vsi->netdev && netif_running(vsi->netdev)) {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ } else {
+ set_bit(__I40E_DOWN, &vsi->state);
+ i40e_down(vsi);
+ }
+}
+
+/**
+ * i40e_unquiesce_vsi - Resume a given VSI
+ * @vsi: the VSI being resumed
+ **/
+static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
+{
+ if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
+ return;
+
+ clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ if (vsi->netdev && netif_running(vsi->netdev))
+ vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+ else
+ i40e_up(vsi); /* this clears the DOWN bit */
+}
+
+/**
+ * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
+{
+ int v;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ i40e_quiesce_vsi(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
+{
+ int v;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ i40e_unquiesce_vsi(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Return the number of TCs from given DCBx configuration
+ **/
+static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
+{
+ int num_tc = 0, i;
+
+ /* Scan the ETS Config Priority Table to find
+ * traffic class enabled for a given priority
+ * and use the traffic class index to get the
+ * number of traffic classes enabled
+ */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] > num_tc)
+ num_tc = dcbcfg->etscfg.prioritytable[i];
+ }
+
+ /* Traffic class index starts from zero so
+ * increment to return the actual count
+ */
+ num_tc++;
+
+ return num_tc;
+}
+
+/**
+ * i40e_dcb_get_enabled_tc - Get enabled traffic classes
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Query the current DCB configuration and return the number of
+ * traffic classes enabled from the given DCBX config
+ **/
+static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
+{
+ u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
+ u8 enabled_tc = 1;
+ u8 i;
+
+ for (i = 0; i < num_tc; i++)
+ enabled_tc |= 1 << i;
+
+ return enabled_tc;
+}
+
+/**
+ * i40e_pf_get_num_tc - Get enabled traffic classes for PF
+ * @pf: PF being queried
+ *
+ * Return number of traffic classes enabled for the given PF
+ **/
+static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 i, enabled_tc;
+ u8 num_tc = 0;
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ /* If DCB is not enabled then always in single TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return 1;
+
+ /* MFP mode return count of enabled TCs for this PF */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ num_tc++;
+ }
+ return num_tc;
+ }
+
+ /* SFP mode will be enabled for all TCs on port */
+ return i40e_dcb_get_num_tc(dcbcfg);
+}
+
+/**
+ * i40e_pf_get_default_tc - Get bitmap for first enabled TC
+ * @pf: PF being queried
+ *
+ * Return a bitmap for first enabled traffic class for this PF.
+ **/
+static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
+{
+ u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ u8 i = 0;
+
+ if (!enabled_tc)
+ return 0x1; /* TC0 */
+
+ /* Find the first enabled TC */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ break;
+ }
+
+ return 1 << i;
+}
+
+/**
+ * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Return a bitmap for enabled traffic classes for this PF.
+ **/
+static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
+{
+ /* If DCB is not enabled for this PF then just return default TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return i40e_pf_get_default_tc(pf);
+
+ /* MFP mode will have enabled TCs set by FW */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
+ return pf->hw.func_caps.enabled_tcmap;
+
+ /* SFP mode we want PF to be enabled for all TCs */
+ return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+}
+
+/**
+ * i40e_vsi_get_bw_info - Query VSI BW Information
+ * @vsi: the VSI being queried
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+{
+ struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 tc_bw_max;
+ int ret;
+ int i;
+
+ /* Get the VSI level BW configuration */
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi bw config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return ret;
+ }
+
+ /* Get the VSI level BW configuration per TC */
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
+ &bw_ets_config,
+ NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return ret;
+ }
+
+ if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
+ dev_info(&pf->pdev->dev,
+ "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
+ bw_config.tc_valid_bits,
+ bw_ets_config.tc_valid_bits);
+ /* Still continuing */
+ }
+
+ vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
+ vsi->bw_max_quanta = bw_config.max_bw;
+ tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
+ (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
+ vsi->bw_ets_limit_credits[i] =
+ le16_to_cpu(bw_ets_config.credits[i]);
+ /* 3 bits out of 4 for each TC */
+ vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
+ }
+ return ret;
+}
+
+/**
+ * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC bitmap
+ * @bw_credits: BW shared credits per TC
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi,
+ u8 enabled_tc,
+ u8 *bw_share)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ int i, ret = 0;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ bw_data.tc_bw_credits[i] = bw_share[i];
+
+ ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ return ret;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC map to be enabled
+ *
+ **/
+static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u8 netdev_tc = 0;
+ int i;
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ if (!netdev)
+ return;
+
+ if (!enabled_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ /* Set up actual enabled TCs on the VSI */
+ if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
+ return;
+
+ /* set per TC queues for the VSI */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* Only set TC queues for enabled tcs
+ *
+ * e.g. For a VSI that has TC0 and TC3 enabled the
+ * enabled_tc bitmap would be 0x00001001; the driver
+ * will set the numtc for netdev as 2 that will be
+ * referenced by the netdev layer as TC 0 and 1.
+ */
+ if (vsi->tc_config.enabled_tc & (1 << i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_config.tc_info[i].netdev_tc,
+ vsi->tc_config.tc_info[i].qcount,
+ vsi->tc_config.tc_info[i].qoffset);
+ }
+
+ /* Assign UP2TC map for the VSI */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ /* Get the actual TC# for the UP */
+ u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
+ * @vsi: the VSI being configured
+ * @ctxt: the ctxt buffer returned from AQ VSI update param command
+ **/
+static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt)
+{
+ /* copy just the sections touched not the entire info
+ * since not all sections are valid as returned by
+ * update vsi params
+ */
+ vsi->info.mapping_flags = ctxt->info.mapping_flags;
+ memcpy(&vsi->info.queue_mapping,
+ &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
+ memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
+ * @vsi: VSI to be configured
+ * @enabled_tc: TC bitmap
+ *
+ * This configures a particular VSI for TCs that are mapped to the
+ * given TC bitmap. It uses default bandwidth share for TCs across
+ * VSIs to configure TC for a particular VSI.
+ *
+ * NOTE:
+ * It is expected that the VSI queues have been quisced before calling
+ * this function.
+ **/
+static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+ u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ struct i40e_vsi_context ctxt;
+ int ret = 0;
+ int i;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (vsi->tc_config.enabled_tc == enabled_tc)
+ return ret;
+
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ bw_share[i] = 1;
+ }
+
+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed configuring TC map %d for VSI %d\n",
+ enabled_tc, vsi->seid);
+ goto out;
+ }
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = vsi->back->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "update vsi failed, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ goto out;
+ }
+ /* update the local VSI info with updated queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+
+ /* Update current VSI BW information */
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed updating vsi bw info, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ /* Update the netdev TC setup */
+ i40e_vsi_config_netdev_tc(vsi, enabled_tc);
+out:
+ return ret;
+}
+
+/**
+ * i40e_up_complete - Finish the last steps of bringing up a connection
+ * @vsi: the VSI being configured
+ **/
+static int i40e_up_complete(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_vsi_configure_msix(vsi);
+ else
+ i40e_configure_msi_and_legacy(vsi);
+
+ /* start rings */
+ err = i40e_vsi_control_rings(vsi, true);
+ if (err)
+ return err;
+
+ clear_bit(__I40E_DOWN, &vsi->state);
+ i40e_napi_enable_all(vsi);
+ i40e_vsi_enable_irq(vsi);
+
+ if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
+ (vsi->netdev)) {
+ netif_tx_start_all_queues(vsi->netdev);
+ netif_carrier_on(vsi->netdev);
+ }
+ i40e_service_event_schedule(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_reinit_locked - Reset the VSI
+ * @vsi: the VSI being configured
+ *
+ * Rebuild the ring structs after some configuration
+ * has changed, e.g. MTU size.
+ **/
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ usleep_range(1000, 2000);
+ i40e_down(vsi);
+
+ /* Give a VF some time to respond to the reset. The
+ * two second wait is based upon the watchdog cycle in
+ * the VF driver.
+ */
+ if (vsi->type == I40E_VSI_SRIOV)
+ msleep(2000);
+ i40e_up(vsi);
+ clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+}
+
+/**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+ int err;
+
+ err = i40e_vsi_configure(vsi);
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @vsi: the VSI being stopped
+ **/
+void i40e_down(struct i40e_vsi *vsi)
+{
+ int i;
+
+ /* It is assumed that the caller of this function
+ * sets the vsi->state __I40E_DOWN bit.
+ */
+ if (vsi->netdev) {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_disable(vsi->netdev);
+ }
+ i40e_vsi_disable_irq(vsi);
+ i40e_vsi_control_rings(vsi, false);
+ i40e_napi_disable_all(vsi);
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_clean_tx_ring(&vsi->tx_rings[i]);
+ i40e_clean_rx_ring(&vsi->rx_rings[i]);
+ }
+}
+
+/**
+ * i40e_setup_tc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ **/
+static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc = 0;
+ int ret = -EINVAL;
+ int i;
+
+ /* Check if DCB enabled to continue */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ netdev_info(netdev, "DCB is not enabled for adapter\n");
+ goto exit;
+ }
+
+ /* Check if MFP enabled */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
+ goto exit;
+ }
+
+ /* Check whether tc count is within enabled limit */
+ if (tc > i40e_pf_get_num_tc(pf)) {
+ netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
+ goto exit;
+ }
+
+ /* Generate TC map for number of tc requested */
+ for (i = 0; i < tc; i++)
+ enabled_tc |= (1 << i);
+
+ /* Requesting same TC configuration as already enabled */
+ if (enabled_tc == vsi->tc_config.enabled_tc)
+ return 0;
+
+ /* Quiesce VSI queues */
+ i40e_quiesce_vsi(vsi);
+
+ /* Configure VSI for enabled TCs */
+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
+ if (ret) {
+ netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
+ vsi->seid);
+ goto exit;
+ }
+
+ /* Unquiesce VSI */
+ i40e_unquiesce_vsi(vsi);
+
+exit:
+ return ret;
+}
+
+/**
+ * i40e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog subtask is
+ * enabled, and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_open(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ char int_name[IFNAMSIZ];
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__I40E_TESTING, &pf->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ /* allocate descriptors */
+ err = i40e_vsi_setup_tx_resources(vsi);
+ if (err)
+ goto err_setup_tx;
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = i40e_vsi_configure(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ err = i40e_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+
+ if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
+ err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
+ if (err)
+ netdev_info(netdev,
+ "couldn't set broadcast err %d aq_err %d\n",
+ err, pf->hw.aq.asq_last_status);
+ }
+
+ return 0;
+
+err_up_complete:
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+err_setup_rx:
+ i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+ i40e_vsi_free_tx_resources(vsi);
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+ return err;
+}
+
+/**
+ * i40e_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * this netdev interface is disabled.
+ *
+ * Returns 0, this is not allowed to fail
+ **/
+static int i40e_close(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (test_and_set_bit(__I40E_DOWN, &vsi->state))
+ return 0;
+
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_do_reset - Start a PF or Core Reset sequence
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ * The essential difference in resets is that the PF Reset
+ * doesn't clear the packet buffers, doesn't reset the PE
+ * firmware, and doesn't bother the other PFs on the chip.
+ **/
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+{
+ u32 val;
+
+ WARN_ON(in_interrupt());
+
+ /* do the biggest reset indicated */
+ if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+
+ /* Request a Global Reset
+ *
+ * This will start the chip's countdown to the actual full
+ * chip reset event, and a warning interrupt to be sent
+ * to all PFs, including the requestor. Our handler
+ * for the warning interrupt will deal with the shutdown
+ * and recovery of the switch setup.
+ */
+ dev_info(&pf->pdev->dev, "GlobalR requested\n");
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+
+ } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+
+ /* Request a Core Reset
+ *
+ * Same as Global Reset, except does *not* include the MAC/PHY
+ */
+ dev_info(&pf->pdev->dev, "CoreR requested\n");
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_CORER_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
+ } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+
+ /* Request a PF Reset
+ *
+ * Resets only the PF-specific registers
+ *
+ * This goes directly to the tear-down and rebuild of
+ * the switch, since we need to do all the recovery as
+ * for the Core Reset.
+ */
+ dev_info(&pf->pdev->dev, "PFR requested\n");
+ i40e_handle_reset_warning(pf);
+
+ } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+ int v;
+
+ /* Find the VSI(s) that requested a re-init */
+ dev_info(&pf->pdev->dev,
+ "VSI reinit requested\n");
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ if (vsi != NULL &&
+ test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
+ i40e_vsi_reinit_locked(pf->vsi[v]);
+ clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+ }
+ }
+
+ /* no further action needed, so return now */
+ return;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "bad reset request 0x%08x\n", reset_flags);
+ return;
+ }
+}
+
+/**
+ * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ *
+ * Handler for LAN Queue Overflow Event generated by the firmware for PF
+ * and VF queues
+ **/
+static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lan_overflow *data =
+ (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+ u32 queue = le32_to_cpu(data->prtdcb_rupto);
+ u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ u16 vf_id;
+
+ dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
+ __func__, queue, qtx_ctl);
+
+ /* Queue belongs to VF, find the VF and issue VF reset */
+ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
+ >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
+ vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
+ >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
+ vf_id -= hw->func_caps.vf_base_id;
+ vf = &pf->vf[vf_id];
+ i40e_vc_notify_vf_reset(vf);
+ /* Allow VF to process pending reset notification */
+ msleep(20);
+ i40e_reset_vf(vf, false);
+ }
+}
+
+/**
+ * i40e_service_event_complete - Finish up the service event
+ * @pf: board private structure
+ **/
+static void i40e_service_event_complete(struct i40e_pf *pf)
+{
+ BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
+
+ /* flush memory to make sure state is correct before next watchog */
+ smp_mb__before_clear_bit();
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+}
+
+/**
+ * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
+ * @pf: board private structure
+ **/
+static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
+{
+ if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
+ return;
+
+ pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &pf->state))
+ return;
+}
+
+/**
+ * i40e_vsi_link_event - notify VSI of a link event
+ * @vsi: vsi to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
+{
+ if (!vsi)
+ return;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ if (!vsi->netdev || !vsi->netdev_registered)
+ break;
+
+ if (link_up) {
+ netif_carrier_on(vsi->netdev);
+ netif_tx_wake_all_queues(vsi->netdev);
+ } else {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_all_queues(vsi->netdev);
+ }
+ break;
+
+ case I40E_VSI_SRIOV:
+ break;
+
+ case I40E_VSI_VMDQ2:
+ case I40E_VSI_CTRL:
+ case I40E_VSI_MIRROR:
+ default:
+ /* there is no notification for other VSIs */
+ break;
+ }
+}
+
+/**
+ * i40e_veb_link_event - notify elements on the veb of a link event
+ * @veb: veb to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
+{
+ struct i40e_pf *pf;
+ int i;
+
+ if (!veb || !veb->pf)
+ return;
+ pf = veb->pf;
+
+ /* depth first... */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
+ i40e_veb_link_event(pf->veb[i], link_up);
+
+ /* ... now the local VSIs */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
+ i40e_vsi_link_event(pf->vsi[i], link_up);
+}
+
+/**
+ * i40e_link_event - Update netif_carrier status
+ * @pf: board private structure
+ **/
+static void i40e_link_event(struct i40e_pf *pf)
+{
+ bool new_link, old_link;
+
+ new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
+ old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+
+ if (new_link == old_link)
+ return;
+
+ netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+ "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+
+ /* Notify the base of the switch tree connected to
+ * the link. Floating VEBs are not notified.
+ */
+ if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+ i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+ else
+ i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
+
+ if (pf->vf)
+ i40e_vc_notify_link_state(pf);
+}
+
+/**
+ * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
+ * @pf: board private structure
+ *
+ * Set the per-queue flags to request a check for stuck queues in the irq
+ * clean functions, then force interrupts to be sure the irq clean is called.
+ **/
+static void i40e_check_hang_subtask(struct i40e_pf *pf)
+{
+ int i, v;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ /* for each VSI/netdev
+ * for each Tx queue
+ * set the check flag
+ * for each q_vector
+ * force an interrupt
+ */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ int armed = 0;
+
+ if (!pf->vsi[v] ||
+ test_bit(__I40E_DOWN, &vsi->state) ||
+ (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ set_check_for_tx_hang(&vsi->tx_rings[i]);
+ if (test_bit(__I40E_HANG_CHECK_ARMED,
+ &vsi->tx_rings[i].state))
+ armed++;
+ }
+
+ if (armed) {
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ } else {
+ u16 vec = vsi->base_vector - 1;
+ u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+ for (i = 0; i < vsi->num_q_vectors; i++, vec++)
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(vec), val);
+ }
+ i40e_flush(&vsi->back->hw);
+ }
+ }
+}
+
+/**
+ * i40e_watchdog_subtask - Check and bring link up
+ * @pf: board private structure
+ **/
+static void i40e_watchdog_subtask(struct i40e_pf *pf)
+{
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ /* Update the stats for active netdevs so the network stack
+ * can look at updated numbers whenever it cares to
+ */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->netdev)
+ i40e_update_stats(pf->vsi[i]);
+
+ /* Update the stats for the active switching components */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i])
+ i40e_update_veb_stats(pf->veb[i]);
+}
+
+/**
+ * i40e_reset_subtask - Set up for resetting the device and driver
+ * @pf: board private structure
+ **/
+static void i40e_reset_subtask(struct i40e_pf *pf)
+{
+ u32 reset_flags = 0;
+
+ if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+ clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+ clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+ clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+ clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ }
+
+ /* If there's a recovery already waiting, it takes
+ * precedence before starting a new reset sequence.
+ */
+ if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
+ i40e_handle_reset_warning(pf);
+ return;
+ }
+
+ /* If we're already down or resetting, just bail */
+ if (reset_flags &&
+ !test_bit(__I40E_DOWN, &pf->state) &&
+ !test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ i40e_do_reset(pf, reset_flags);
+}
+
+/**
+ * i40e_handle_link_event - Handle link event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static void i40e_handle_link_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_aqc_get_link_status *status =
+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+ /* save off old link status information */
+ memcpy(&pf->hw.phy.link_info_old, hw_link_info,
+ sizeof(pf->hw.phy.link_info_old));
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
+ hw_link_info->link_info = status->link_info;
+ hw_link_info->an_info = status->an_info;
+ hw_link_info->ext_info = status->ext_info;
+ hw_link_info->lse_enable =
+ le16_to_cpu(status->command_flags) &
+ I40E_AQ_LSE_ENABLE;
+
+ /* process the event */
+ i40e_link_event(pf);
+
+ /* Do a new status request to re-enable LSE reporting
+ * and load new status information into the hw struct,
+ * then see if the status changed while processing the
+ * initial event.
+ */
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+ i40e_link_event(pf);
+}
+
+/**
+ * i40e_clean_adminq_subtask - Clean the AdminQ rings
+ * @pf: board private structure
+ **/
+static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+{
+ struct i40e_arq_event_info event;
+ struct i40e_hw *hw = &pf->hw;
+ u16 pending, i = 0;
+ i40e_status ret;
+ u16 opcode;
+ u32 val;
+
+ if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
+ return;
+
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &pending);
+ if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+ dev_info(&pf->pdev->dev, "No ARQ event found\n");
+ break;
+ } else if (ret) {
+ dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
+ break;
+ }
+
+ opcode = le16_to_cpu(event.desc.opcode);
+ switch (opcode) {
+
+ case i40e_aqc_opc_get_link_status:
+ i40e_handle_link_event(pf, &event);
+ break;
+ case i40e_aqc_opc_send_msg_to_pf:
+ ret = i40e_vc_process_vf_msg(pf,
+ le16_to_cpu(event.desc.retval),
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low),
+ event.msg_buf,
+ event.msg_size);
+ break;
+ case i40e_aqc_opc_lldp_update_mib:
+ dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+ break;
+ case i40e_aqc_opc_event_lan_overflow:
+ dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+ i40e_handle_lan_overflow_event(pf, &event);
+ break;
+ default:
+ dev_info(&pf->pdev->dev,
+ "ARQ Error: Unknown event %d received\n",
+ event.desc.opcode);
+ break;
+ }
+ } while (pending && (i++ < pf->adminq_work_limit));
+
+ clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ /* re-enable Admin queue interrupt cause */
+ val = rd32(hw, I40E_PFINT_ICR0_ENA);
+ val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
+ i40e_flush(hw);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * @veb: pointer to the VEB instance
+ *
+ * This is a recursive function that first builds the attached VSIs then
+ * recurses in to build the next layer of VEB. We track the connections
+ * through our own index numbers because the seid's from the HW could
+ * change across the reset.
+ **/
+static int i40e_reconstitute_veb(struct i40e_veb *veb)
+{
+ struct i40e_vsi *ctl_vsi = NULL;
+ struct i40e_pf *pf = veb->pf;
+ int v, veb_idx;
+ int ret;
+
+ /* build VSI that owns this VEB, temporarily attached to base VEB */
+ for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
+ if (pf->vsi[v] &&
+ pf->vsi[v]->veb_idx == veb->idx &&
+ pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
+ ctl_vsi = pf->vsi[v];
+ break;
+ }
+ }
+ if (!ctl_vsi) {
+ dev_info(&pf->pdev->dev,
+ "missing owner VSI for veb_idx %d\n", veb->idx);
+ ret = -ENOENT;
+ goto end_reconstitute;
+ }
+ if (ctl_vsi != pf->vsi[pf->lan_vsi])
+ ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of owner VSI failed: %d\n", ret);
+ goto end_reconstitute;
+ }
+ i40e_vsi_reset_stats(ctl_vsi);
+
+ /* create the VEB in the switch and move the VSI onto the VEB */
+ ret = i40e_add_veb(veb, ctl_vsi);
+ if (ret)
+ goto end_reconstitute;
+
+ /* create the remaining VSIs attached to this VEB */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ continue;
+
+ if (pf->vsi[v]->veb_idx == veb->idx) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ vsi->uplink_seid = veb->seid;
+ ret = i40e_add_vsi(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of vsi_idx %d failed: %d\n",
+ v, ret);
+ goto end_reconstitute;
+ }
+ i40e_vsi_reset_stats(vsi);
+ }
+ }
+
+ /* create any VEBs attached to this VEB - RECURSION */
+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+ if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
+ pf->veb[veb_idx]->uplink_seid = veb->seid;
+ ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
+ if (ret)
+ break;
+ }
+ }
+
+end_reconstitute:
+ return ret;
+}
+
+/**
+ * i40e_get_capabilities - get info about the HW
+ * @pf: the PF struct
+ **/
+static int i40e_get_capabilities(struct i40e_pf *pf)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+ u16 data_size;
+ int buf_len;
+ int err;
+
+ buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+ do {
+ cap_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!cap_buf)
+ return -ENOMEM;
+
+ /* this loads the data into the hw struct for us */
+ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
+ &data_size,
+ i40e_aqc_opc_list_func_capabilities,
+ NULL);
+ /* data loaded, buffer no longer needed */
+ kfree(cap_buf);
+
+ if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ /* retry with a larger buffer */
+ buf_len = data_size;
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+ dev_info(&pf->pdev->dev,
+ "capability discovery failed: aq=%d\n",
+ pf->hw.aq.asq_last_status);
+ return -ENODEV;
+ }
+ } while (err);
+
+ if (pf->hw.debug_mask & I40E_DEBUG_USER)
+ dev_info(&pf->pdev->dev,
+ "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
+ pf->hw.pf_id, pf->hw.func_caps.num_vfs,
+ pf->hw.func_caps.num_msix_vectors,
+ pf->hw.func_caps.num_msix_vectors_vf,
+ pf->hw.func_caps.fd_filters_guaranteed,
+ pf->hw.func_caps.fd_filters_best_effort,
+ pf->hw.func_caps.num_tx_qp,
+ pf->hw.func_caps.num_vsis);
+
+ return 0;
+}
+
+/**
+ * i40e_fdir_setup - initialize the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_setup(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi;
+ bool new_vsi = false;
+ int err, i;
+
+ if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+ return;
+
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+
+ /* find existing or make new FDIR VSI */
+ vsi = NULL;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ vsi = pf->vsi[i];
+ if (!vsi) {
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
+ pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
+ return;
+ }
+ new_vsi = true;
+ }
+ WARN_ON(vsi->base_queue != I40E_FDIR_RING);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+
+ err = i40e_vsi_setup_tx_resources(vsi);
+ if (!err)
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (!err)
+ err = i40e_vsi_configure(vsi);
+ if (!err && new_vsi) {
+ char int_name[IFNAMSIZ + 9];
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ }
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+}
+
+/**
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_teardown(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_release(pf->vsi[i]);
+ break;
+ }
+ }
+}
+
+/**
+ * i40e_handle_reset_warning - prep for the core to reset
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ struct i40e_driver_version dv;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
+ clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ return;
+
+ dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+
+ i40e_vc_notify_reset(pf);
+
+ /* quiesce the VSIs and their queues that are not already DOWN */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ pf->vsi[v]->seid = 0;
+ }
+
+ i40e_shutdown_adminq(&pf->hw);
+
+ /* Now we wait for GRST to settle out.
+ * We don't have to delete the VEBs or VSIs from the hw switch
+ * because the reset will make them disappear.
+ */
+ ret = i40e_pf_reset(hw);
+ if (ret)
+ dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+ pf->pfr_count++;
+
+ if (test_bit(__I40E_DOWN, &pf->state))
+ goto end_core_reset;
+ dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
+
+ /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+ ret = i40e_init_adminq(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+ goto end_core_reset;
+ }
+
+ ret = i40e_get_capabilities(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
+ ret);
+ goto end_core_reset;
+ }
+
+ /* call shutdown HMC */
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+
+ ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+ ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+
+ /* do basic switch setup */
+ ret = i40e_setup_pf_switch(pf);
+ if (ret)
+ goto end_core_reset;
+
+ /* Rebuild the VSIs and VEBs that existed before reset.
+ * They are still in our local switch element arrays, so only
+ * need to rebuild the switch model in the HW.
+ *
+ * If there were VEBs but the reconstitution failed, we'll try
+ * try to recover minimal use by getting the basic PF VSI working.
+ */
+ if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
+ dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
+ /* find the one VEB connected to the MAC, and find orphans */
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+
+ if (pf->veb[v]->uplink_seid == pf->mac_seid ||
+ pf->veb[v]->uplink_seid == 0) {
+ ret = i40e_reconstitute_veb(pf->veb[v]);
+
+ if (!ret)
+ continue;
+
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (pf->veb[v]->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ pf->vsi[pf->lan_vsi]->uplink_seid
+ = pf->mac_seid;
+ break;
+ } else if (pf->veb[v]->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
+ }
+ }
+ }
+ }
+
+ if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+ /* no VEB, so rebuild only the Main VSI */
+ ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of Main VSI failed: %d\n", ret);
+ goto end_core_reset;
+ }
+ }
+
+ /* reinit the misc interrupt */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ ret = i40e_setup_misc_vector(pf);
+
+ /* restart the VSIs that were rebuilt and running before the reset */
+ i40e_pf_unquiesce_all_vsi(pf);
+
+ /* tell the firmware that we're starting */
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+
+ dev_info(&pf->pdev->dev, "PF reset done\n");
+
+end_core_reset:
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+}
+
+/**
+ * i40e_handle_mdd_event
+ * @pf: pointer to the pf structure
+ *
+ * Called from the MDD irq handler to identify possibly malicious vfs
+ **/
+static void i40e_handle_mdd_event(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ bool mdd_detected = false;
+ struct i40e_vf *vf;
+ u32 reg;
+ int i;
+
+ if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
+ return;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
+ >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
+ >> I40E_GL_MDET_TX_EVENT_SHIFT;
+ u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
+ >> I40E_GL_MDET_TX_QUEUE_SHIFT;
+ dev_info(&pf->pdev->dev,
+ "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
+ event, queue, func);
+ wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+ mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
+ >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
+ >> I40E_GL_MDET_RX_EVENT_SHIFT;
+ u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
+ >> I40E_GL_MDET_RX_QUEUE_SHIFT;
+ dev_info(&pf->pdev->dev,
+ "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
+ event, queue, func);
+ wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ /* see if one of the VFs needs its hand slapped */
+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ vf = &(pf->vf[i]);
+ reg = rd32(hw, I40E_VP_MDET_TX(i));
+ if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
+ }
+
+ reg = rd32(hw, I40E_VP_MDET_RX(i));
+ if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
+ }
+
+ if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
+ dev_info(&pf->pdev->dev,
+ "Too many MDD events on VF %d, disabled\n", i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ }
+ }
+
+ /* re-enable mdd interrupt cause */
+ clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_service_task - Run the driver's async subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40e_service_task(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work,
+ struct i40e_pf,
+ service_task);
+ unsigned long start_time = jiffies;
+
+ i40e_reset_subtask(pf);
+ i40e_handle_mdd_event(pf);
+ i40e_vc_process_vflr_event(pf);
+ i40e_watchdog_subtask(pf);
+ i40e_fdir_reinit_subtask(pf);
+ i40e_check_hang_subtask(pf);
+ i40e_sync_filters_subtask(pf);
+ i40e_clean_adminq_subtask(pf);
+
+ i40e_service_event_complete(pf);
+
+ /* If the tasks have taken longer than one timer cycle or there
+ * is more work to be done, reschedule the service task now
+ * rather than wait for the timer to tick again.
+ */
+ if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
+ test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
+ test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
+ test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_service_timer - timer callback
+ * @data: pointer to PF struct
+ **/
+static void i40e_service_timer(unsigned long data)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)data;
+
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ vsi->alloc_queue_pairs = pf->num_lan_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ vsi->num_q_vectors = pf->num_lan_msix;
+ else
+ vsi->num_q_vectors = 1;
+
+ break;
+
+ case I40E_VSI_FDIR:
+ vsi->alloc_queue_pairs = 1;
+ vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_q_vectors = 1;
+ break;
+
+ case I40E_VSI_VMDQ2:
+ vsi->alloc_queue_pairs = pf->num_vmdq_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_q_vectors = pf->num_vmdq_msix;
+ break;
+
+ case I40E_VSI_SRIOV:
+ vsi->alloc_queue_pairs = pf->num_vf_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ break;
+
+ default:
+ WARN_ON(1);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
+{
+ int ret = -ENODEV;
+ struct i40e_vsi *vsi;
+ int vsi_idx;
+ int i;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->switch_mutex);
+
+ /* VSI list may be fragmented if VSI creation/destruction has
+ * been happening. We can afford to do a quick scan to look
+ * for any free VSIs in the list.
+ *
+ * find next empty vsi slot, looping back around if necessary
+ */
+ i = pf->next_vsi;
+ while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
+ i++;
+ if (i >= pf->hw.func_caps.num_vsis) {
+ i = 0;
+ while (i < pf->next_vsi && pf->vsi[i])
+ i++;
+ }
+
+ if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
+ vsi_idx = i; /* Found one! */
+ } else {
+ ret = -ENODEV;
+ goto err_alloc_vsi; /* out of VSI slots! */
+ }
+ pf->next_vsi = ++i;
+
+ vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
+ if (!vsi) {
+ ret = -ENOMEM;
+ goto err_alloc_vsi;
+ }
+ vsi->type = type;
+ vsi->back = pf;
+ set_bit(__I40E_DOWN, &vsi->state);
+ vsi->flags = 0;
+ vsi->idx = vsi_idx;
+ vsi->rx_itr_setting = pf->rx_itr_default;
+ vsi->tx_itr_setting = pf->tx_itr_default;
+ vsi->netdev_registered = false;
+ vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
+ INIT_LIST_HEAD(&vsi->mac_filter_list);
+
+ i40e_set_num_rings_in_vsi(vsi);
+
+ /* Setup default MSIX irq handler for VSI */
+ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+
+ pf->vsi[vsi_idx] = vsi;
+ ret = vsi_idx;
+err_alloc_vsi:
+ mutex_unlock(&pf->switch_mutex);
+ return ret;
+}
+
+/**
+ * i40e_vsi_clear - Deallocate the VSI provided
+ * @vsi: the VSI being un-configured
+ **/
+static int i40e_vsi_clear(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf;
+
+ if (!vsi)
+ return 0;
+
+ if (!vsi->back)
+ goto free_vsi;
+ pf = vsi->back;
+
+ mutex_lock(&pf->switch_mutex);
+ if (!pf->vsi[vsi->idx]) {
+ dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
+ vsi->idx, vsi->idx, vsi, vsi->type);
+ goto unlock_vsi;
+ }
+
+ if (pf->vsi[vsi->idx] != vsi) {
+ dev_err(&pf->pdev->dev,
+ "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
+ pf->vsi[vsi->idx]->idx,
+ pf->vsi[vsi->idx],
+ pf->vsi[vsi->idx]->type,
+ vsi->idx, vsi, vsi->type);
+ goto unlock_vsi;
+ }
+
+ /* updates the pf for this cleared vsi */
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+
+ pf->vsi[vsi->idx] = NULL;
+ if (vsi->idx < pf->next_vsi)
+ pf->next_vsi = vsi->idx;
+
+unlock_vsi:
+ mutex_unlock(&pf->switch_mutex);
+free_vsi:
+ kfree(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_alloc_rings(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ int i;
+
+ vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!vsi->rx_rings) {
+ ret = -ENOMEM;
+ goto err_alloc_rings;
+ }
+
+ vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!vsi->tx_rings) {
+ ret = -ENOMEM;
+ kfree(vsi->rx_rings);
+ goto err_alloc_rings;
+ }
+
+ /* Set basic values in the rings to be used later during open() */
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = &vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+
+ tx_ring->queue_index = i;
+ tx_ring->reg_idx = vsi->base_queue + i;
+ tx_ring->ring_active = false;
+ tx_ring->vsi = vsi;
+ tx_ring->netdev = vsi->netdev;
+ tx_ring->dev = &pf->pdev->dev;
+ tx_ring->count = vsi->num_desc;
+ tx_ring->size = 0;
+ tx_ring->dcb_tc = 0;
+
+ rx_ring->queue_index = i;
+ rx_ring->reg_idx = vsi->base_queue + i;
+ rx_ring->ring_active = false;
+ rx_ring->vsi = vsi;
+ rx_ring->netdev = vsi->netdev;
+ rx_ring->dev = &pf->pdev->dev;
+ rx_ring->count = vsi->num_desc;
+ rx_ring->size = 0;
+ rx_ring->dcb_tc = 0;
+ if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
+ set_ring_16byte_desc_enabled(rx_ring);
+ else
+ clear_ring_16byte_desc_enabled(rx_ring);
+ }
+
+err_alloc_rings:
+ return ret;
+}
+
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ if (vsi) {
+ kfree(vsi->rx_rings);
+ kfree(vsi->tx_rings);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
+ * @pf: board private structure
+ * @vectors: the number of MSI-X vectors to request
+ *
+ * Returns the number of vectors reserved, or error
+ **/
+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
+{
+ int err = 0;
+
+ pf->num_msix_entries = 0;
+ while (vectors >= I40E_MIN_MSIX) {
+ err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
+ if (err == 0) {
+ /* good to go */
+ pf->num_msix_entries = vectors;
+ break;
+ } else if (err < 0) {
+ /* total failure */
+ dev_info(&pf->pdev->dev,
+ "MSI-X vector reservation failed: %d\n", err);
+ vectors = 0;
+ break;
+ } else {
+ /* err > 0 is the hint for retry */
+ dev_info(&pf->pdev->dev,
+ "MSI-X vectors wanted %d, retrying with %d\n",
+ vectors, err);
+ vectors = err;
+ }
+ }
+
+ if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ dev_info(&pf->pdev->dev,
+ "Couldn't get enough vectors, only %d available\n",
+ vectors);
+ vectors = 0;
+ }
+
+ return vectors;
+}
+
+/**
+ * i40e_init_msix - Setup the MSIX capability
+ * @pf: board private structure
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_msix(struct i40e_pf *pf)
+{
+ i40e_status err = 0;
+ struct i40e_hw *hw = &pf->hw;
+ int v_budget, i;
+ int vec;
+
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return -ENODEV;
+
+ /* The number of vectors we'll request will be comprised of:
+ * - Add 1 for "other" cause for Admin Queue events, etc.
+ * - The number of LAN queue pairs
+ * already adjusted for the NUMA node
+ * assumes symmetric Tx/Rx pairing
+ * - The number of VMDq pairs
+ * Once we count this up, try the request.
+ *
+ * If we can't get what we want, we'll simplify to nearly nothing
+ * and try again. If that still fails, we punt.
+ */
+ pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
+ v_budget = 1 + pf->num_lan_msix;
+ v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
+ if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+ v_budget++;
+
+ /* Scale down if necessary, and the rings will share vectors */
+ v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
+
+ pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!pf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < v_budget; i++)
+ pf->msix_entries[i].entry = i;
+ vec = i40e_reserve_msix_vectors(pf, v_budget);
+ if (vec < I40E_MIN_MSIX) {
+ pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ return -ENODEV;
+
+ } else if (vec == I40E_MIN_MSIX) {
+ /* Adjust for minimal MSIX use */
+ dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_vsis = 0;
+ pf->num_vmdq_qps = 0;
+ pf->num_vmdq_msix = 0;
+ pf->num_lan_qps = 1;
+ pf->num_lan_msix = 1;
+
+ } else if (vec != v_budget) {
+ /* Scale vector usage down */
+ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
+ vec--; /* reserve the misc vector */
+
+ /* partition out the remaining vectors */
+ switch (vec) {
+ case 2:
+ pf->num_vmdq_vsis = 1;
+ pf->num_lan_msix = 1;
+ break;
+ case 3:
+ pf->num_vmdq_vsis = 1;
+ pf->num_lan_msix = 2;
+ break;
+ default:
+ pf->num_lan_msix = min_t(int, (vec / 2),
+ pf->num_lan_qps);
+ pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int v_idx, num_q_vectors;
+
+ /* if not MSIX, give the one vector only to the LAN VSI */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_q_vectors = vsi->num_q_vectors;
+ else if (vsi == pf->vsi[pf->lan_vsi])
+ num_q_vectors = 1;
+ else
+ return -EINVAL;
+
+ vsi->q_vectors = kcalloc(num_q_vectors,
+ sizeof(struct i40e_q_vector),
+ GFP_KERNEL);
+ if (!vsi->q_vectors)
+ return -ENOMEM;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ vsi->q_vectors[v_idx].vsi = vsi;
+ vsi->q_vectors[v_idx].v_idx = v_idx;
+ cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
+ i40e_napi_poll, vsi->work_limit);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ **/
+static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
+{
+ int err = 0;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_init_msix(pf);
+ if (err) {
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+
+ /* rework the queue expectations without MSIX */
+ i40e_determine_queue_usage(pf);
+ }
+ }
+
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ err = pci_enable_msi(pf->pdev);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI init failed (%d), trying legacy.\n", err);
+ pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+ }
+ }
+
+ /* track first vector for misc interrupts */
+ err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+}
+
+/**
+ * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * when in MSI or Legacy interrupt mode.
+ **/
+static int i40e_setup_misc_vector(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ /* Only request the irq if this is the first time through, and
+ * not when we're rebuilding after a Reset
+ */
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ err = request_irq(pf->msix_entries[0].vector,
+ i40e_intr, 0, pf->misc_int_name, pf);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "request_irq for msix_misc failed: %d\n", err);
+ return -EFAULT;
+ }
+ }
+
+ i40e_enable_misc_int_causes(hw);
+
+ /* associate no queues to the misc vector */
+ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+
+ i40e_flush(hw);
+
+ i40e_irq_dynamic_enable_icr0(pf);
+
+ return err;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
+ /* Set of random keys generated using kernel random number generator */
+ static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
+ 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
+ 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+
+ /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
+ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+
+ /* The assumption is that lan qp count will be the highest
+ * qp count for any PF VSI that needs RSS.
+ * If multiple VSIs need RSS support, all the qp counts
+ * for those VSIs should be a power of 2 for RSS to work.
+ * If LAN VSI is the only consumer for RSS then this requirement
+ * is not necessary.
+ */
+ if (j == pf->rss_size)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_sw_init - Initialize general software structures (struct i40e_pf)
+ * @pf: board private structure to initialize
+ *
+ * i40e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int i40e_sw_init(struct i40e_pf *pf)
+{
+ int err = 0;
+ int size;
+
+ pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
+ (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
+ if (I40E_DEBUG_USER & debug)
+ pf->hw.debug_mask = debug;
+ pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
+ I40E_DEFAULT_MSG_ENABLE);
+ }
+
+ /* Set default capability flags */
+ pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+ I40E_FLAG_MSI_ENABLED |
+ I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RX_PS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_RX_1BUF_ENABLED;
+
+ pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ if (pf->hw.func_caps.rss) {
+ pf->flags |= I40E_FLAG_RSS_ENABLED;
+ pf->rss_size = min_t(int, pf->rss_size_max,
+ nr_cpus_node(numa_node_id()));
+ } else {
+ pf->rss_size = 1;
+ }
+
+ if (pf->hw.func_caps.dcb)
+ pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
+ else
+ pf->num_tc_qps = 0;
+
+ if (pf->hw.func_caps.fd) {
+ /* FW/NVM is not yet fixed in this regard */
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
+ dev_info(&pf->pdev->dev,
+ "Flow Director ATR mode Enabled\n");
+ pf->flags |= I40E_FLAG_FDIR_ENABLED;
+ dev_info(&pf->pdev->dev,
+ "Flow Director Side Band mode Enabled\n");
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ }
+ } else {
+ pf->fdir_pf_filter_count = 0;
+ }
+
+ if (pf->hw.func_caps.vmdq) {
+ pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
+ pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+ }
+
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
+
+#ifdef CONFIG_PCI_IOV
+ if (pf->hw.func_caps.num_vfs) {
+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+ pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+ pf->num_req_vfs = min_t(int,
+ pf->hw.func_caps.num_vfs,
+ I40E_MAX_VF_COUNT);
+ }
+#endif /* CONFIG_PCI_IOV */
+ pf->eeprom_version = 0xDEAD;
+ pf->lan_veb = I40E_NO_VEB;
+ pf->lan_vsi = I40E_NO_VSI;
+
+ /* set up queue assignment tracking */
+ size = sizeof(struct i40e_lump_tracking)
+ + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
+ pf->qp_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->qp_pile) {
+ err = -ENOMEM;
+ goto sw_init_done;
+ }
+ pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
+ pf->qp_pile->search_hint = 0;
+
+ /* set up vector assignment tracking */
+ size = sizeof(struct i40e_lump_tracking)
+ + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
+ pf->irq_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->irq_pile) {
+ kfree(pf->qp_pile);
+ err = -ENOMEM;
+ goto sw_init_done;
+ }
+ pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
+ pf->irq_pile->search_hint = 0;
+
+ mutex_init(&pf->switch_mutex);
+
+sw_init_done:
+ return err;
+}
+
+/**
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ **/
+static int i40e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ i40e_vlan_stripping_enable(vsi);
+ else
+ i40e_vlan_stripping_disable(vsi);
+
+ return 0;
+}
+
+static const struct net_device_ops i40e_netdev_ops = {
+ .ndo_open = i40e_open,
+ .ndo_stop = i40e_close,
+ .ndo_start_xmit = i40e_lan_xmit_frame,
+ .ndo_get_stats64 = i40e_get_netdev_stats_struct,
+ .ndo_set_rx_mode = i40e_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40e_set_mac,
+ .ndo_change_mtu = i40e_change_mtu,
+ .ndo_tx_timeout = i40e_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i40e_netpoll,
+#endif
+ .ndo_setup_tc = i40e_setup_tc,
+ .ndo_set_features = i40e_set_features,
+ .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+ .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
+ .ndo_get_vf_config = i40e_ndo_get_vf_config,
+};
+
+/**
+ * i40e_config_netdev - Setup the netdev flags
+ * @vsi: the VSI being configured
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_config_netdev(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
+ int etherdev_size;
+
+ etherdev_size = sizeof(struct i40e_netdev_priv);
+ netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
+ if (!netdev)
+ return -ENOMEM;
+
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO |
+ NETIF_F_SG;
+
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_RXHASH |
+ 0;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+ } else {
+ /* relate the VSI_VMDQ name to the VSI_MAIN name */
+ snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ random_ether_addr(mac_addr);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+ }
+
+ memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+ memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
+ /* vlan gets same features (except vlan offload)
+ * after any tweaks for specific VSI types
+ */
+ netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* Setup netdev TC information */
+ i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
+
+ netdev->netdev_ops = &i40e_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
+ i40e_set_ethtool_ops(netdev);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_delete - Delete a VSI from the switch
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static void i40e_vsi_delete(struct i40e_vsi *vsi)
+{
+ /* remove default VSI is not allowed */
+ if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
+ return;
+
+ /* there is no HW VSI for FDIR */
+ if (vsi->type == I40E_VSI_FDIR)
+ return;
+
+ i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
+ return;
+}
+
+/**
+ * i40e_add_vsi - Add a VSI to the switch
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command.
+ **/
+static int i40e_add_vsi(struct i40e_vsi *vsi)
+{
+ int ret = -ENODEV;
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ u8 enabled_tc = 0x1; /* TC0 enabled */
+ int f_count = 0;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ /* The PF's main VSI is already setup as part of the
+ * device initialization, so we'll not bother with
+ * the add_vsi call, but we will retrieve the current
+ * VSI context.
+ */
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return -ENOENT;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+
+ vsi->seid = ctxt.seid;
+ vsi->id = ctxt.vsi_number;
+
+ enabled_tc = i40e_pf_get_tc_map(pf);
+
+ /* MFP mode setup queue map and update VSI */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto err;
+ }
+ /* update the local VSI info queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+ } else {
+ /* Default/Main VSI is only enabled for TC0
+ * reconfigure it to enable all TCs that are
+ * available on the port in SFP mode.
+ */
+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
+ enabled_tc, ret,
+ pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ }
+ }
+ break;
+
+ case I40E_VSI_FDIR:
+ /* no queue mapping or actual HW VSI needed */
+ vsi->info.valid_sections = 0;
+ vsi->seid = 0;
+ vsi->id = 0;
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ return 0;
+ break;
+
+ case I40E_VSI_VMDQ2:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+
+ /* This VSI is connected to VEB so the switch_id
+ * should be set to zero by default.
+ */
+ ctxt.info.switch_id = 0;
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ /* Setup the VSI tx/rx queue map for TC0 only for now */
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ case I40E_VSI_SRIOV:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+
+ /* This VSI is connected to VEB so the switch_id
+ * should be set to zero by default.
+ */
+ ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ /* Setup the VSI tx/rx queue map for TC0 only for now */
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ if (vsi->type != I40E_VSI_MAIN) {
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add vsi failed, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto err;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+ vsi->seid = ctxt.seid;
+ vsi->id = ctxt.vsi_number;
+ }
+
+ /* If macvlan filters already exist, force them to get loaded */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ f->changed = true;
+ f_count++;
+ }
+ if (f_count) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ pf->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+
+ /* Update VSI BW information */
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get vsi bw info, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ /* VSI is already added so not tearing that up */
+ ret = 0;
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * i40e_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+int i40e_vsi_release(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_veb *veb = NULL;
+ struct i40e_pf *pf;
+ u16 uplink_seid;
+ int i, n;
+
+ pf = vsi->back;
+
+ /* release of a VEB-owner or last VSI is not allowed */
+ if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
+ dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
+ vsi->seid, vsi->uplink_seid);
+ return -ENODEV;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi] &&
+ !test_bit(__I40E_DOWN, &pf->state)) {
+ dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
+ return -ENODEV;
+ }
+
+ uplink_seid = vsi->uplink_seid;
+ if (vsi->type != I40E_VSI_SRIOV) {
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ if (vsi->netdev) {
+ /* results in a call to i40e_close() */
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ } else {
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+ }
+ i40e_vsi_disable_irq(vsi);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, f->vlan,
+ f->is_vf, f->is_netdev);
+ i40e_sync_vsi_filters(vsi);
+
+ i40e_vsi_delete(vsi);
+ i40e_vsi_free_q_vectors(vsi);
+ i40e_vsi_clear_rings(vsi);
+ i40e_vsi_clear(vsi);
+
+ /* If this was the last thing on the VEB, except for the
+ * controlling VSI, remove the VEB, which puts the controlling
+ * VSI onto the next level down in the switch.
+ *
+ * Well, okay, there's one more exception here: don't remove
+ * the orphan VEBs yet. We'll wait for an explicit remove request
+ * from up the network stack.
+ */
+ for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] &&
+ pf->vsi[i]->uplink_seid == uplink_seid &&
+ (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+ n++; /* count the VSIs */
+ }
+ }
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+ if (pf->veb[i]->uplink_seid == uplink_seid)
+ n++; /* count the VEBs */
+ if (pf->veb[i]->seid == uplink_seid)
+ veb = pf->veb[i];
+ }
+ if (n == 0 && veb && veb->uplink_seid != 0)
+ i40e_veb_release(veb);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after i40e_vsi_mem_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ **/
+static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
+{
+ int ret = -ENOENT;
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi->q_vectors) {
+ dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->seid);
+ return -EEXIST;
+ }
+
+ if (vsi->base_vector) {
+ dev_info(&pf->pdev->dev,
+ "VSI %d has non-zero base vector %d\n",
+ vsi->seid, vsi->base_vector);
+ return -EEXIST;
+ }
+
+ ret = i40e_alloc_q_vectors(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->seid, ret);
+ vsi->num_q_vectors = 0;
+ goto vector_setup_out;
+ }
+
+ vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+ vsi->num_q_vectors, vsi->idx);
+ if (vsi->base_vector < 0) {
+ dev_info(&pf->pdev->dev,
+ "failed to get q tracking for VSI %d, err=%d\n",
+ vsi->seid, vsi->base_vector);
+ i40e_vsi_free_q_vectors(vsi);
+ ret = -ENOENT;
+ goto vector_setup_out;
+ }
+
+vector_setup_out:
+ return ret;
+}
+
+/**
+ * i40e_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @type: VSI type
+ * @uplink_seid: the switch element to link to
+ * @param1: usage depends upon VSI type. For VF types, indicates VF id
+ *
+ * This allocates the sw VSI structure and its queue resources, then add a VSI
+ * to the identified VEB.
+ *
+ * Returns pointer to the successfully allocated and configure VSI sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+ u16 uplink_seid, u32 param1)
+{
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb = NULL;
+ int ret, i;
+ int v_idx;
+
+ /* The requested uplink_seid must be either
+ * - the PF's port seid
+ * no VEB is needed because this is the PF
+ * or this is a Flow Director special case VSI
+ * - seid of an existing VEB
+ * - seid of a VSI that owns an existing VEB
+ * - seid of a VSI that doesn't own a VEB
+ * a new VEB is created and the VSI becomes the owner
+ * - seid of the PF VSI, which is what creates the first VEB
+ * this is a special case of the previous
+ *
+ * Find which uplink_seid we were given and create a new VEB if needed
+ */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
+ veb = pf->veb[i];
+ break;
+ }
+ }
+
+ if (!veb && uplink_seid != pf->mac_seid) {
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
+ vsi = pf->vsi[i];
+ break;
+ }
+ }
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
+ uplink_seid);
+ return NULL;
+ }
+
+ if (vsi->uplink_seid == pf->mac_seid)
+ veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+ if (!veb) {
+ dev_info(&pf->pdev->dev, "couldn't add VEB\n");
+ return NULL;
+ }
+
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ uplink_seid = veb->seid;
+ }
+
+ /* get vsi sw struct */
+ v_idx = i40e_vsi_mem_alloc(pf, type);
+ if (v_idx < 0)
+ goto err_alloc;
+ vsi = pf->vsi[v_idx];
+ vsi->type = type;
+ vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
+
+ if (type == I40E_VSI_MAIN)
+ pf->lan_vsi = v_idx;
+ else if (type == I40E_VSI_SRIOV)
+ vsi->vf_id = param1;
+ /* assign it some queues */
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* get a VSI from the hardware */
+ vsi->uplink_seid = uplink_seid;
+ ret = i40e_add_vsi(vsi);
+ if (ret)
+ goto err_vsi;
+
+ switch (vsi->type) {
+ /* setup the netdev if needed */
+ case I40E_VSI_MAIN:
+ case I40E_VSI_VMDQ2:
+ ret = i40e_config_netdev(vsi);
+ if (ret)
+ goto err_netdev;
+ ret = register_netdev(vsi->netdev);
+ if (ret)
+ goto err_netdev;
+ vsi->netdev_registered = true;
+ netif_carrier_off(vsi->netdev);
+ /* fall through */
+
+ case I40E_VSI_FDIR:
+ /* set up vectors and rings if needed */
+ ret = i40e_vsi_setup_vectors(vsi);
+ if (ret)
+ goto err_msix;
+
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+
+ i40e_vsi_reset_stats(vsi);
+ break;
+
+ default:
+ /* no netdev or rings for the other VSI types */
+ break;
+ }
+
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+err_msix:
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+err_netdev:
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+err_alloc:
+ return NULL;
+}
+
+/**
+ * i40e_veb_get_bw_info - Query VEB BW information
+ * @veb: the veb to query
+ *
+ * Query the Tx scheduler BW configuration data for given VEB
+ **/
+static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+{
+ struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
+ struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
+ struct i40e_pf *pf = veb->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 tc_bw_max;
+ int ret = 0;
+ int i;
+
+ ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "query veb bw config failed, aq_err=%d\n",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+
+ ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
+ &ets_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "query veb bw ets config failed, aq_err=%d\n",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+
+ veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
+ veb->bw_max_quanta = ets_data.tc_bw_max;
+ veb->is_abs_credits = bw_data.absolute_credits_enable;
+ tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
+ (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
+ veb->bw_tc_limit_credits[i] =
+ le16_to_cpu(bw_data.tc_bw_limits[i]);
+ veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
+ * @pf: board private structure
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_veb_mem_alloc(struct i40e_pf *pf)
+{
+ int ret = -ENOENT;
+ struct i40e_veb *veb;
+ int i;
+
+ /* Need to protect the allocation of switch elements at the PF level */
+ mutex_lock(&pf->switch_mutex);
+
+ /* VEB list may be fragmented if VEB creation/destruction has
+ * been happening. We can afford to do a quick scan to look
+ * for any free slots in the list.
+ *
+ * find next empty veb slot, looping back around if necessary
+ */
+ i = 0;
+ while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
+ i++;
+ if (i >= I40E_MAX_VEB) {
+ ret = -ENOMEM;
+ goto err_alloc_veb; /* out of VEB slots! */
+ }
+
+ veb = kzalloc(sizeof(*veb), GFP_KERNEL);
+ if (!veb) {
+ ret = -ENOMEM;
+ goto err_alloc_veb;
+ }
+ veb->pf = pf;
+ veb->idx = i;
+ veb->enabled_tc = 1;
+
+ pf->veb[i] = veb;
+ ret = i;
+err_alloc_veb:
+ mutex_unlock(&pf->switch_mutex);
+ return ret;
+}
+
+/**
+ * i40e_switch_branch_release - Delete a branch of the switch tree
+ * @branch: where to start deleting
+ *
+ * This uses recursion to find the tips of the branch to be
+ * removed, deleting until we get back to and can delete this VEB.
+ **/
+static void i40e_switch_branch_release(struct i40e_veb *branch)
+{
+ struct i40e_pf *pf = branch->pf;
+ u16 branch_seid = branch->seid;
+ u16 veb_idx = branch->idx;
+ int i;
+
+ /* release any VEBs on this VEB - RECURSION */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+ if (pf->veb[i]->uplink_seid == branch->seid)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+ /* Release the VSIs on this VEB, but not the owner VSI.
+ *
+ * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
+ * the VEB itself, so don't use (*branch) after this loop.
+ */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (!pf->vsi[i])
+ continue;
+ if (pf->vsi[i]->uplink_seid == branch_seid &&
+ (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+ i40e_vsi_release(pf->vsi[i]);
+ }
+ }
+
+ /* There's one corner case where the VEB might not have been
+ * removed, so double check it here and remove it if needed.
+ * This case happens if the veb was created from the debugfs
+ * commands and no VSIs were added to it.
+ */
+ if (pf->veb[veb_idx])
+ i40e_veb_release(pf->veb[veb_idx]);
+}
+
+/**
+ * i40e_veb_clear - remove veb struct
+ * @veb: the veb to remove
+ **/
+static void i40e_veb_clear(struct i40e_veb *veb)
+{
+ if (!veb)
+ return;
+
+ if (veb->pf) {
+ struct i40e_pf *pf = veb->pf;
+
+ mutex_lock(&pf->switch_mutex);
+ if (pf->veb[veb->idx] == veb)
+ pf->veb[veb->idx] = NULL;
+ mutex_unlock(&pf->switch_mutex);
+ }
+
+ kfree(veb);
+}
+
+/**
+ * i40e_veb_release - Delete a VEB and free its resources
+ * @veb: the VEB being removed
+ **/
+void i40e_veb_release(struct i40e_veb *veb)
+{
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_pf *pf;
+ int i, n = 0;
+
+ pf = veb->pf;
+
+ /* find the remaining VSI and check for extras */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ n++;
+ vsi = pf->vsi[i];
+ }
+ }
+ if (n != 1) {
+ dev_info(&pf->pdev->dev,
+ "can't remove VEB %d with %d VSIs left\n",
+ veb->seid, n);
+ return;
+ }
+
+ /* move the remaining VSI to uplink veb */
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ if (veb->uplink_seid) {
+ vsi->uplink_seid = veb->uplink_seid;
+ if (veb->uplink_seid == pf->mac_seid)
+ vsi->veb_idx = I40E_NO_VEB;
+ else
+ vsi->veb_idx = veb->veb_idx;
+ } else {
+ /* floating VEB */
+ vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ }
+
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ i40e_veb_clear(veb);
+
+ return;
+}
+
+/**
+ * i40e_add_veb - create the VEB in the switch
+ * @veb: the VEB to be instantiated
+ * @vsi: the controlling VSI
+ **/
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+{
+ bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ int ret;
+
+ /* get a VEB from the hardware */
+ ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+ veb->enabled_tc, is_default, &veb->seid, NULL);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't add VEB, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ return -EPERM;
+ }
+
+ /* get statistics counter */
+ ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+ &veb->stats_idx, NULL, NULL, NULL);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't get VEB statistics idx, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ return -EPERM;
+ }
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't get VEB bw info, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+ return -ENOENT;
+ }
+
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+
+ return 0;
+}
+
+/**
+ * i40e_veb_setup - Set up a VEB
+ * @pf: board private structure
+ * @flags: VEB setup flags
+ * @uplink_seid: the switch element to link to
+ * @vsi_seid: the initial VSI seid
+ * @enabled_tc: Enabled TC bit-map
+ *
+ * This allocates the sw VEB structure and links it into the switch
+ * It is possible and legal for this to be a duplicate of an already
+ * existing VEB. It is also possible for both uplink and vsi seids
+ * to be zero, in order to create a floating VEB.
+ *
+ * Returns pointer to the successfully allocated VEB sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
+ u16 uplink_seid, u16 vsi_seid,
+ u8 enabled_tc)
+{
+ struct i40e_veb *veb, *uplink_veb = NULL;
+ int vsi_idx, veb_idx;
+ int ret;
+
+ /* if one seid is 0, the other must be 0 to create a floating relay */
+ if ((uplink_seid == 0 || vsi_seid == 0) &&
+ (uplink_seid + vsi_seid != 0)) {
+ dev_info(&pf->pdev->dev,
+ "one, not both seid's are 0: uplink=%d vsi=%d\n",
+ uplink_seid, vsi_seid);
+ return NULL;
+ }
+
+ /* make sure there is such a vsi and uplink */
+ for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
+ if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
+ break;
+ if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
+ dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
+ return NULL;
+ }
+
+ if (uplink_seid && uplink_seid != pf->mac_seid) {
+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+ if (pf->veb[veb_idx] &&
+ pf->veb[veb_idx]->seid == uplink_seid) {
+ uplink_veb = pf->veb[veb_idx];
+ break;
+ }
+ }
+ if (!uplink_veb) {
+ dev_info(&pf->pdev->dev,
+ "uplink seid %d not found\n", uplink_seid);
+ return NULL;
+ }
+ }
+
+ /* get veb sw struct */
+ veb_idx = i40e_veb_mem_alloc(pf);
+ if (veb_idx < 0)
+ goto err_alloc;
+ veb = pf->veb[veb_idx];
+ veb->flags = flags;
+ veb->uplink_seid = uplink_seid;
+ veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
+ veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+
+ /* create the VEB in the switch */
+ ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ if (ret)
+ goto err_veb;
+
+ return veb;
+
+err_veb:
+ i40e_veb_clear(veb);
+err_alloc:
+ return NULL;
+}
+
+/**
+ * i40e_setup_pf_switch_element - set pf vars based on switch type
+ * @pf: board private structure
+ * @ele: element we are building info from
+ * @num_reported: total number of elements
+ * @printconfig: should we print the contents
+ *
+ * helper function to assist in extracting a few useful SEID values.
+ **/
+static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
+ struct i40e_aqc_switch_config_element_resp *ele,
+ u16 num_reported, bool printconfig)
+{
+ u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
+ u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
+ u8 element_type = ele->element_type;
+ u16 seid = le16_to_cpu(ele->seid);
+
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "type=%d seid=%d uplink=%d downlink=%d\n",
+ element_type, seid, uplink_seid, downlink_seid);
+
+ switch (element_type) {
+ case I40E_SWITCH_ELEMENT_TYPE_MAC:
+ pf->mac_seid = seid;
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_VEB:
+ /* Main VEB? */
+ if (uplink_seid != pf->mac_seid)
+ break;
+ if (pf->lan_veb == I40E_NO_VEB) {
+ int v;
+
+ /* find existing or else empty VEB */
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
+ pf->lan_veb = v;
+ break;
+ }
+ }
+ if (pf->lan_veb == I40E_NO_VEB) {
+ v = i40e_veb_mem_alloc(pf);
+ if (v < 0)
+ break;
+ pf->lan_veb = v;
+ }
+ }
+
+ pf->veb[pf->lan_veb]->seid = seid;
+ pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
+ pf->veb[pf->lan_veb]->pf = pf;
+ pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_VSI:
+ if (num_reported != 1)
+ break;
+ /* This is immediately after a reset so we can assume this is
+ * the PF's VSI
+ */
+ pf->mac_seid = uplink_seid;
+ pf->pf_seid = downlink_seid;
+ pf->main_vsi_seid = seid;
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "pf_seid=%d main_vsi_seid=%d\n",
+ pf->pf_seid, pf->main_vsi_seid);
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_PF:
+ case I40E_SWITCH_ELEMENT_TYPE_VF:
+ case I40E_SWITCH_ELEMENT_TYPE_EMP:
+ case I40E_SWITCH_ELEMENT_TYPE_BMC:
+ case I40E_SWITCH_ELEMENT_TYPE_PE:
+ case I40E_SWITCH_ELEMENT_TYPE_PA:
+ /* ignore these for now */
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
+ element_type, seid);
+ break;
+ }
+}
+
+/**
+ * i40e_fetch_switch_configuration - Get switch config from firmware
+ * @pf: board private structure
+ * @printconfig: should we print the contents
+ *
+ * Get the current switch configuration from the device and
+ * extract a few useful SEID values.
+ **/
+int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+{
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ u16 next_seid = 0;
+ int ret = 0;
+ u8 *aq_buf;
+ int i;
+
+ aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
+ if (!aq_buf)
+ return -ENOMEM;
+
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+ do {
+ u16 num_reported, num_total;
+
+ ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
+ I40E_AQ_LARGE_BUF,
+ &next_seid, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "get switch config failed %d aq_err=%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(aq_buf);
+ return -ENOENT;
+ }
+
+ num_reported = le16_to_cpu(sw_config->header.num_reported);
+ num_total = le16_to_cpu(sw_config->header.num_total);
+
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "header: %d reported %d total\n",
+ num_reported, num_total);
+
+ if (num_reported) {
+ int sz = sizeof(*sw_config) * num_reported;
+
+ kfree(pf->sw_config);
+ pf->sw_config = kzalloc(sz, GFP_KERNEL);
+ if (pf->sw_config)
+ memcpy(pf->sw_config, sw_config, sz);
+ }
+
+ for (i = 0; i < num_reported; i++) {
+ struct i40e_aqc_switch_config_element_resp *ele =
+ &sw_config->element[i];
+
+ i40e_setup_pf_switch_element(pf, ele, num_reported,
+ printconfig);
+ }
+ } while (next_seid != 0);
+
+ kfree(aq_buf);
+ return ret;
+}
+
+/**
+ * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
+ * @pf: board private structure
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_setup_pf_switch(struct i40e_pf *pf)
+{
+ int ret;
+
+ /* find out what's out there already */
+ ret = i40e_fetch_switch_configuration(pf, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't fetch switch config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return ret;
+ }
+ i40e_pf_reset_stats(pf);
+
+ /* fdir VSI must happen first to be sure it gets queue 0, but only
+ * if there is enough room for the fdir VSI
+ */
+ if (pf->num_lan_qps > 1)
+ i40e_fdir_setup(pf);
+
+ /* first time setup */
+ if (pf->lan_vsi == I40E_NO_VSI) {
+ struct i40e_vsi *vsi = NULL;
+ u16 uplink_seid;
+
+ /* Set up the PF VSI associated with the PF's main VSI
+ * that is already in the HW switch
+ */
+ if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+ uplink_seid = pf->veb[pf->lan_veb]->seid;
+ else
+ uplink_seid = pf->mac_seid;
+
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
+ i40e_fdir_teardown(pf);
+ return -EAGAIN;
+ }
+ /* accommodate kcompat by copying the main VSI queue count
+ * into the pf, since this newer code pushes the pf queue
+ * info down a level into a VSI
+ */
+ pf->num_rx_queues = vsi->alloc_queue_pairs;
+ pf->num_tx_queues = vsi->alloc_queue_pairs;
+ } else {
+ /* force a reset of TC and queue layout configurations */
+ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ }
+ i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+
+ /* Setup static PF queue filter control settings */
+ ret = i40e_setup_pf_filter_control(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
+ ret);
+ /* Failure here should not stop continuing other steps */
+ }
+
+ /* enable RSS in the HW, even for only one queue, as the stack can use
+ * the hash
+ */
+ if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+ i40e_config_rss(pf);
+
+ /* fill in link information and enable LSE reporting */
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+ i40e_link_event(pf);
+
+ /* Initialize user-specifics link properties */
+ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+ I40E_AQ_AN_COMPLETED) ? true : false);
+ pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
+ if (pf->hw.phy.link_info.an_info &
+ (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ pf->hw.fc.current_mode = I40E_FC_FULL;
+ else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+ pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+ pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+
+ return ret;
+}
+
+/**
+ * i40e_set_rss_size - helper to set rss_size
+ * @pf: board private structure
+ * @queues_left: how many queues
+ */
+static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
+{
+ int num_tc0;
+
+ num_tc0 = min_t(int, queues_left, pf->rss_size_max);
+ num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
+ num_tc0 = rounddown_pow_of_two(num_tc0);
+
+ return num_tc0;
+}
+
+/**
+ * i40e_determine_queue_usage - Work out queue distribution
+ * @pf: board private structure
+ **/
+static void i40e_determine_queue_usage(struct i40e_pf *pf)
+{
+ int accum_tc_size;
+ int queues_left;
+
+ pf->num_lan_qps = 0;
+ pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
+ accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
+
+ /* Find the max queues to be put into basic use. We'll always be
+ * using TC0, whether or not DCB is running, and TC0 will get the
+ * big RSS set.
+ */
+ queues_left = pf->hw.func_caps.num_tx_qp;
+
+ if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
+ !(pf->flags & (I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
+ (queues_left == 1)) {
+
+ /* one qp for PF, no queues for anything else */
+ queues_left = 0;
+ pf->rss_size = pf->num_lan_qps = 1;
+
+ /* make sure all the fancies are disabled */
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ pf->num_lan_qps = pf->rss_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ (pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ /* save num_tc_qps queues for TCs 1 thru 7 and the rest
+ * are set up for RSS in TC0
+ */
+ queues_left -= accum_tc_size;
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size + accum_tc_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ queues_left -= 1; /* save 1 queue for FD */
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ (pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ /* save 1 queue for TCs 1 thru 7,
+ * 1 queue for flow director,
+ * and the rest are set up for RSS in TC0
+ */
+ queues_left -= 1;
+ queues_left -= accum_tc_size;
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size + accum_tc_size;
+
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Invalid configuration, flags=0x%08llx\n", pf->flags);
+ return;
+ }
+
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ pf->num_vf_qps && pf->num_req_vfs && queues_left) {
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
+ pf->num_vf_qps));
+ queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
+ }
+
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
+ pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
+ (queues_left / pf->num_vmdq_qps));
+ queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
+ }
+
+ return;
+}
+
+/**
+ * i40e_setup_pf_filter_control - Setup PF static filter control
+ * @pf: PF to be setup
+ *
+ * i40e_setup_pf_filter_control sets up a pf's initial filter control
+ * settings. If PE/FCoE are enabled then it will also set the per PF
+ * based filter sizes required for them. It also enables Flow director,
+ * ethertype and macvlan type filter settings for the pf.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
+{
+ struct i40e_filter_control_settings *settings = &pf->filter_settings;
+
+ settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
+
+ /* Flow Director is enabled */
+ if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+ settings->enable_fdir = true;
+
+ /* Ethtype and MACVLAN filters enabled for PF */
+ settings->enable_ethtype = true;
+ settings->enable_macvlan = true;
+
+ if (i40e_set_filter_control(&pf->hw, settings))
+ return -ENOENT;
+
+ return 0;
+}
+
+/**
+ * i40e_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40e_pci_tbl
+ *
+ * i40e_probe initializes a pf identified by a pci_dev structure.
+ * The OS initialization, configuring of the pf private structure,
+ * and a hardware reset occur.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct i40e_driver_version dv;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int err = 0;
+ u32 len;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ /* set up for high or low dma */
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ /* coherent mask for the same size will always succeed if
+ * dma_set_mask does
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
+ err = -EIO;
+ goto err_dma;
+ }
+
+ /* set up pci connections */
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM), i40e_driver_name);
+ if (err) {
+ dev_info(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ /* Now that we have a PCI connection, we need to do the
+ * low level device setup. This is primarily setting up
+ * the Admin Queue structures and then querying for the
+ * device's current profile information.
+ */
+ pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ if (!pf) {
+ err = -ENOMEM;
+ goto err_pf_alloc;
+ }
+ pf->next_vsi = 0;
+ pf->pdev = pdev;
+ set_bit(__I40E_DOWN, &pf->state);
+
+ hw = &pf->hw;
+ hw->back = pf;
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
+ (unsigned int)pci_resource_start(pdev, 0),
+ (unsigned int)pci_resource_len(pdev, 0), err);
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+
+ /* Reset here to make sure all is clean and to define PF 'n' */
+ err = i40e_pf_reset(hw);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
+ }
+ pf->pfr_count++;
+
+ hw->aq.num_arq_entries = I40E_AQ_LEN;
+ hw->aq.num_asq_entries = I40E_AQ_LEN;
+ hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+ pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
+ snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
+ "%s-pf%d:misc",
+ dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
+
+ err = i40e_init_shared_code(hw);
+ if (err) {
+ dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+ goto err_pf_reset;
+ }
+
+ err = i40e_init_adminq(hw);
+ dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+ if (err) {
+ dev_info(&pdev->dev,
+ "init_adminq failed: %d expecting API %02x.%02x\n",
+ err,
+ I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+ goto err_pf_reset;
+ }
+
+ err = i40e_get_capabilities(pf);
+ if (err)
+ goto err_adminq_setup;
+
+ err = i40e_sw_init(pf);
+ if (err) {
+ dev_info(&pdev->dev, "sw_init failed: %d\n", err);
+ goto err_sw_init;
+ }
+
+ err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ if (err) {
+ dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
+ goto err_init_lan_hmc;
+ }
+
+ err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (err) {
+ dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
+ err = -ENOENT;
+ goto err_configure_lan_hmc;
+ }
+
+ i40e_get_mac_addr(hw, hw->mac.addr);
+ if (i40e_validate_mac_addr(hw->mac.addr)) {
+ dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
+ err = -EIO;
+ goto err_mac_addr;
+ }
+ dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
+ memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+
+ pci_set_drvdata(pdev, pf);
+ pci_save_state(pdev);
+
+ /* set up periodic task facility */
+ setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
+ pf->service_timer_period = HZ;
+
+ INIT_WORK(&pf->service_task, i40e_service_task);
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+ pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
+ pf->link_check_timeout = jiffies;
+
+ /* set up the main switch operations */
+ i40e_determine_queue_usage(pf);
+ i40e_init_interrupt_scheme(pf);
+
+ /* Set up the *vsi struct based on the number of VSIs in the HW,
+ * and set up our local tracking of the MAIN PF vsi.
+ */
+ len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
+ pf->vsi = kzalloc(len, GFP_KERNEL);
+ if (!pf->vsi)
+ goto err_switch_setup;
+
+ err = i40e_setup_pf_switch(pf);
+ if (err) {
+ dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
+ goto err_vsis;
+ }
+
+ /* The main driver is (mostly) up and happy. We need to set this state
+ * before setting up the misc vector or we get a race and the vector
+ * ends up disabled forever.
+ */
+ clear_bit(__I40E_DOWN, &pf->state);
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_setup_misc_vector(pf);
+ if (err) {
+ dev_info(&pdev->dev,
+ "setup of misc vector failed: %d\n", err);
+ goto err_vsis;
+ }
+ }
+
+ /* prep for VF support */
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ u32 val;
+
+ /* disable link interrupts for VFs */
+ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ i40e_flush(hw);
+ }
+
+ i40e_dbg_pf_init(pf);
+
+ /* tell the firmware that we're starting */
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+
+ /* since everything's happy, start the service_task timer */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+
+ /* Unwind what we've done if something failed in the setup */
+err_vsis:
+ set_bit(__I40E_DOWN, &pf->state);
+err_switch_setup:
+ i40e_clear_interrupt_scheme(pf);
+ kfree(pf->vsi);
+ del_timer_sync(&pf->service_timer);
+err_mac_addr:
+err_configure_lan_hmc:
+ (void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+ kfree(pf->qp_pile);
+ kfree(pf->irq_pile);
+err_sw_init:
+err_adminq_setup:
+ (void)i40e_shutdown_adminq(hw);
+err_pf_reset:
+ iounmap(hw->hw_addr);
+err_ioremap:
+ kfree(pf);
+err_pf_alloc:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * i40e_remove - Device removal routine
+ * @pdev: PCI device information struct
+ *
+ * i40e_remove is called by the PCI subsystem to alert the driver
+ * that is should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40e_remove(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ i40e_status ret_code;
+ u32 reg;
+ int i;
+
+ i40e_dbg_pf_exit(pf);
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+
+ /* no more scheduling of any task */
+ set_bit(__I40E_DOWN, &pf->state);
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
+ i40e_fdir_teardown(pf);
+
+ /* If there is a switch structure or any orphans, remove them.
+ * This will leave only the PF's VSI remaining.
+ */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+
+ if (pf->veb[i]->uplink_seid == pf->mac_seid ||
+ pf->veb[i]->uplink_seid == 0)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+ /* Now we can shutdown the PF's VSI, just before we kill
+ * adminq and hmc.
+ */
+ if (pf->vsi[pf->lan_vsi])
+ i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
+ /* shutdown and destroy the HMC */
+ ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the HMC resources: %d\n", ret_code);
+
+ /* shutdown the adminq */
+ i40e_aq_queue_shutdown(&pf->hw, true);
+ ret_code = i40e_shutdown_adminq(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the Admin Queue resources: %d\n",
+ ret_code);
+
+ /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+ i40e_clear_interrupt_scheme(pf);
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i]) {
+ i40e_vsi_clear_rings(pf->vsi[i]);
+ i40e_vsi_clear(pf->vsi[i]);
+ pf->vsi[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ kfree(pf->veb[i]);
+ pf->veb[i] = NULL;
+ }
+
+ kfree(pf->qp_pile);
+ kfree(pf->irq_pile);
+ kfree(pf->sw_config);
+ kfree(pf->vsi);
+
+ /* force a PF reset to clean anything leftover */
+ reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
+ wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ i40e_flush(&pf->hw);
+
+ iounmap(pf->hw.hw_addr);
+ kfree(pf);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40e_pci_error_detected - warning that something funky happened in PCI land
+ * @pdev: PCI device information struct
+ *
+ * Called to warn that something happened and the error handling steps
+ * are in progress. Allows the driver to quiesce things, be ready for
+ * remediation.
+ **/
+static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state error)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+
+ /* shutdown all operations */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * i40e_pci_error_slot_reset - a PCI slot reset just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to find if the driver can work with the device now that
+ * the pci slot has been reset. If a basic connection seems good
+ * (registers are readable and have sane content) then return a
+ * happy little PCI_ERS_RESULT_xxx.
+ **/
+static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+ u32 reg;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+ if (pci_enable_device_mem(pdev)) {
+ dev_info(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ if (reg == 0)
+ result = PCI_ERS_RESULT_RECOVERED;
+ else
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_info(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+ err);
+ /* non-fatal, continue */
+ }
+
+ return result;
+}
+
+/**
+ * i40e_pci_error_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error
+ * and/or reset recovery has finished.
+ **/
+static void i40e_pci_error_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+ i40e_handle_reset_warning(pf);
+}
+
+static const struct pci_error_handlers i40e_err_handler = {
+ .error_detected = i40e_pci_error_detected,
+ .slot_reset = i40e_pci_error_slot_reset,
+ .resume = i40e_pci_error_resume,
+};
+
+static struct pci_driver i40e_driver = {
+ .name = i40e_driver_name,
+ .id_table = i40e_pci_tbl,
+ .probe = i40e_probe,
+ .remove = i40e_remove,
+ .err_handler = &i40e_err_handler,
+ .sriov_configure = i40e_pci_sriov_configure,
+};
+
+/**
+ * i40e_init_module - Driver registration routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40e_init_module(void)
+{
+ pr_info("%s: %s - version %s\n", i40e_driver_name,
+ i40e_driver_string, i40e_driver_version_str);
+ pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+ i40e_dbg_init();
+ return pci_register_driver(&i40e_driver);
+}
+module_init(i40e_init_module);
+
+/**
+ * i40e_exit_module - Driver exit cleanup routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40e_exit_module(void)
+{
+ pci_unregister_driver(&i40e_driver);
+ i40e_dbg_exit();
+}
+module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
new file mode 100644
index 0000000..97e1bb3
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -0,0 +1,391 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_prototype.h"
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers.
+ * @hw: pointer to the HW structure.
+ *
+ * Setups the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+i40e_status i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ i40e_status ret_code = 0;
+ u32 fla, gens;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB). */
+ nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode. */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
+ /* Max NVM timeout. */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = false;
+ } else { /* Blank programming mode. */
+ nvm->blank_nvm_mode = true;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
+ * @hw: pointer to the HW structure.
+ * @access: NVM access type (read or write).
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ i40e_status ret_code = 0;
+ u64 gtime, timeout;
+ u64 time = 0;
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time, NULL);
+ /* Reading the Global Device Timer. */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout. */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+
+ if (ret_code) {
+ /* Set the polling timeout. */
+ if (time > I40E_MAX_NVM_TIMEOUT)
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ + gtime;
+ else
+ timeout = hw->nvm.hw_semaphore_timeout;
+ /* Poll until the current NVM owner timeouts. */
+ while (gtime < timeout) {
+ usleep_range(10000, 20000);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time,
+ NULL);
+ if (!ret_code) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time) + gtime;
+ break;
+ }
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ }
+ if (ret_code) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ hw->nvm.hw_semaphore_wait =
+ I40E_MS_TO_GTIME(time) + gtime;
+ hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
+ time);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership.
+ * @hw: pointer to the HW structure.
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ if (!hw->nvm.blank_nvm_mode)
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
+ * @hw: pointer to the HW structure.
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = 0;
+ break;
+ }
+ udelay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_srctl - Reads Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @data: word read from the Shadow RAM.
+ *
+ * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ if (offset >= hw->nvm.sr_size) {
+ hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first. */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (!ret_code) {
+ /* Write the address and start reading. */
+ sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set. */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (!ret_code) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code)
+ hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads Shadow RAM word.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @data: word read from the Shadow RAM.
+ *
+ * Reads 16 bit word from the Shadow RAM. Each read is preceded
+ * with the NVM ownership taking and followed by the release.
+ **/
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = 0;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: number of words to read (in) &
+ * number of words read before the NVM ownership timeout (out).
+ * @data: words read from the Shadow RAM.
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code = 0;
+ u16 index, word;
+ u32 time;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ /* Loop thru the selected region. */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
+ if (ret_code)
+ break;
+ /* Check if we didn't exceeded the semaphore timeout. */
+ time = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (time >= hw->nvm.hw_semaphore_timeout) {
+ ret_code = I40E_ERR_TIMEOUT;
+ hw_dbg(hw, "NVM read error: timeout.\n");
+ break;
+ }
+ }
+ /* Update the number of words read from the Shadow RAM. */
+ *words = word;
+ /* Release the NVM ownership. */
+ i40e_release_nvm(hw);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * This function calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ i40e_status ret_code = 0;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ u16 word = 0;
+ u32 i = 0;
+
+ /* read pointer to VPD area */
+ ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ i++;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i == (u32)vpd_module) {
+ i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i == (u32)pcie_alt_module) {
+ i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+
+ ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ checksum_local += word;
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ i40e_status ret_code = 0;
+ u16 checksum_sr = 0;
+ u16 checksum_local;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_code)
+ goto i40e_validate_nvm_checksum_exit;
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ if (ret_code)
+ goto i40e_validate_nvm_checksum_free;
+
+ /* Do not use i40e_read_nvm_word() because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+i40e_validate_nvm_checksum_free:
+ i40e_release_nvm(hw);
+
+i40e_validate_nvm_checksum_exit:
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
new file mode 100644
index 0000000..702c81b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+#include <linux/highuid.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) do {} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40e %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
new file mode 100644
index 0000000..f75bb9c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -0,0 +1,239 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40e_init_adminq(struct i40e_hw *hw);
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw,
+ enum i40e_debug_mask mask,
+ void *desc,
+ void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode);
+
+/* admin send queue commands */
+
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
+i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+/* i40e_common */
+i40e_status i40e_init_shared_code(struct i40e_hw *hw);
+i40e_status i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
+ u8 *mac_addr);
+i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+/* prototype for functions used for NVM access */
+i40e_status i40e_init_nvm(struct i40e_hw *hw);
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
new file mode 100644
index 0000000..6bd333c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -0,0 +1,4688 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
+#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
+#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPM_DMACR 0x000881F4
+#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
+#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
+#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
+#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
+#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
+#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
+#define I40E_GLPM_LTRC 0x000BE500
+#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
+#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
+#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
+#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
+#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
+#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
+#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
+#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
+#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_HPTC 0x000AC800
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
+#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
+#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
+#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
+#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
+#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
+#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
+#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
+#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
+#define I40E_PFPM_PROXYFC 0x00245A80
+#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
+#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
+#define I40E_PFPM_PROXYFC_EX_SHIFT 1
+#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
+#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_SHIFT 9
+#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
+#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
+#define I40E_PFPM_PROXYS 0x00245B80
+#define I40E_PFPM_PROXYS_EX_SHIFT 1
+#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_SHIFT 4
+#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_NS_SHIFT 9
+#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
+#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_MLD_SHIFT 12
+#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
+#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
new file mode 100644
index 0000000..5e5bcdd
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -0,0 +1,101 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
new file mode 100644
index 0000000..49d2cfa
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -0,0 +1,1817 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/**
+ * i40e_program_fdir_filter - Program a Flow Director filter
+ * @fdir_input: Packet data that will be filter parameters
+ * @pf: The pf pointer
+ * @add: True for add/update, False for remove
+ **/
+int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+ struct i40e_pf *pf, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ struct i40e_ring *tx_ring;
+ struct i40e_vsi *vsi;
+ struct device *dev;
+ dma_addr_t dma;
+ u32 td_cmd = 0;
+ u16 i;
+
+ /* find existing FDIR VSI */
+ vsi = NULL;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ vsi = pf->vsi[i];
+ if (!vsi)
+ return -ENOENT;
+
+ tx_ring = &vsi->tx_rings[0];
+ dev = tx_ring->dev;
+
+ dma = dma_map_single(dev, fdir_data->raw_packet,
+ I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_fail;
+
+ /* grab the next descriptor */
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
+ tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
+ << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+ & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+ fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
+ << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+ & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+ fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
+ << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+ & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+ /* Use LAN VSI Id if not programmed by user */
+ if (fdir_data->dest_vsi == 0)
+ fdir_desc->qindex_flex_ptype_vsi |=
+ cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
+ << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ else
+ fdir_desc->qindex_flex_ptype_vsi |=
+ cpu_to_le32((fdir_data->dest_vsi
+ << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+ & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+ fdir_desc->dtype_cmd_cntindex =
+ cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+ if (add)
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
+ << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ else
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
+ << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
+ << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+ & I40E_TXD_FLTR_QW1_DEST_MASK);
+
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+ & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+ if (fdir_data->cnt_index != 0) {
+ fdir_desc->dtype_cmd_cntindex |=
+ cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+ fdir_desc->dtype_cmd_cntindex |=
+ cpu_to_le32((fdir_data->cnt_index
+ << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+ & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+ }
+
+ fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+
+ /* Now program a dummy descriptor */
+ tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ td_cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_DUMMY;
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+
+ /* Mark the data descriptor to be watched */
+ tx_buf->next_to_watch = tx_desc;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(tx_ring->next_to_use, tx_ring->tail);
+ return 0;
+
+dma_fail:
+ return -1;
+}
+
+/**
+ * i40e_fd_handle_status - check the Programming Status for FD
+ * @rx_ring: the Rx ring for this descriptor
+ * @qw: the descriptor data
+ * @prog_id: the id originally used for programming
+ *
+ * This is used to verify if the FD programming or invalidation
+ * requested by SW to the HW is successful or not and take actions accordingly.
+ **/
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
+{
+ struct pci_dev *pdev = rx_ring->vsi->back->pdev;
+ u32 error;
+
+ error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+
+ /* for now just print the Status */
+ dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
+ prog_id, error);
+}
+
+/**
+ * i40e_unmap_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->dma) {
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
+ dma_unmap_page(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->dma = 0;
+ tx_buffer->time_stamp = 0;
+}
+
+/**
+ * i40e_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ struct i40e_tx_buffer *tx_buffer;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer = &tx_ring->tx_bi[i];
+ i40e_unmap_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer->skb)
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+ }
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * i40e_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40e_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40e_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+ u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+ ? ring->next_to_use
+ : ring->next_to_use + ring->count);
+ return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+ u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * PFC clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+ tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and disarm the hang check */
+ tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+ clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_packets = 0;
+ unsigned int total_bytes = 0;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+
+ for (; budget; budget--) {
+ struct i40e_tx_desc *eop_desc;
+
+ eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* count the packet as being completed */
+ tx_ring->tx_stats.completed++;
+ tx_buf->next_to_watch = NULL;
+ tx_buf->time_stamp = 0;
+
+ /* set memory barrier before eop_desc is verified */
+ rmb();
+
+ do {
+ i40e_unmap_tx_resource(tx_ring, tx_buf);
+
+ /* clear dtype status */
+ tx_desc->cmd_type_offset_bsz &=
+ ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+
+ if (likely(tx_desc == eop_desc)) {
+ eop_desc = NULL;
+
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+ }
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+ } while (eop_desc);
+ }
+
+ tx_ring->next_to_clean = i;
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+ if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+ " VSI <%d>\n"
+ " Tx Queue <%d>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n",
+ tx_ring->vsi->seid,
+ tx_ring->queue_index,
+ tx_ring->next_to_use, i);
+ dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->tx_bi[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ dev_info(tx_ring->dev,
+ "tx hang detected on queue %d, resetting adapter\n",
+ tx_ring->queue_index);
+
+ tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt. The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern. Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+ enum i40e_latency_range new_latency_range = rc->latency_range;
+ u32 new_itr = rc->itr;
+ int bytes_per_int;
+
+ if (rc->total_packets == 0 || !rc->itr)
+ return;
+
+ /* simple throttlerate management
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
+ */
+ bytes_per_int = rc->total_bytes / rc->itr;
+ switch (rc->itr) {
+ case I40E_LOWEST_LATENCY:
+ if (bytes_per_int > 10)
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ case I40E_LOW_LATENCY:
+ if (bytes_per_int > 20)
+ new_latency_range = I40E_BULK_LATENCY;
+ else if (bytes_per_int <= 10)
+ new_latency_range = I40E_LOWEST_LATENCY;
+ break;
+ case I40E_BULK_LATENCY:
+ if (bytes_per_int <= 20)
+ rc->latency_range = I40E_LOW_LATENCY;
+ break;
+ }
+
+ switch (new_latency_range) {
+ case I40E_LOWEST_LATENCY:
+ new_itr = I40E_ITR_100K;
+ break;
+ case I40E_LOW_LATENCY:
+ new_itr = I40E_ITR_20K;
+ break;
+ case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != rc->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * rc->itr) /
+ ((9 * new_itr) + rc->itr);
+ rc->itr = new_itr & I40E_MAX_ITR;
+ }
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+ u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+ struct i40e_hw *hw = &q_vector->vsi->back->hw;
+ u32 reg_addr;
+ u16 old_itr;
+
+ reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr)
+ wr32(hw, reg_addr, q_vector->rx.itr);
+
+ reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr)
+ wr32(hw, reg_addr, q_vector->tx.itr);
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_clean_programming_status - clean the programming status descriptor
+ * @rx_ring: the rx ring that has this descriptor
+ * @rx_desc: the rx descriptor written back by HW
+ *
+ * Flow director should handle FD_FILTER_STATUS to check its filter programming
+ * status being successful or not and take actions accordingly. FCoE should
+ * handle its context/filter programming/invalidation status and take actions.
+ *
+ **/
+static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ u64 qw;
+ u8 id;
+
+ qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+ if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
+ i40e_fd_handle_status(rx_ring, qw, id);
+}
+
+/**
+ * i40e_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ if (rx_bi->dma) {
+ dma_unmap_single(dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+ if (rx_bi->skb) {
+ dev_kfree_skb(rx_bi->skb);
+ rx_bi->skb = NULL;
+ }
+ if (rx_bi->page) {
+ if (rx_bi->page_dma) {
+ dma_unmap_page(dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ __free_page(rx_bi->page);
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+ }
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40e_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40e_clean_rx_ring(rx_ring);
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+ ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+ : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ struct sk_buff *skb;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u64 flags = vsi->back->flags;
+
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ if (flags & I40E_FLAG_IN_NETPOLL)
+ netif_rx(skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ u32 rx_status,
+ u32 rx_error)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* IP or L4 checksum error */
+ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc)
+{
+ if (ring->netdev->features & NETIF_F_RXHASH) {
+ if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ I40E_RX_DESC_FLTSTAT_RSS_HASH)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ }
+ return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ const int current_node = numa_node_id();
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u16 i = rx_ring->next_to_clean;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u64 qword;
+
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ union i40e_rx_desc *next_rxd;
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+ goto next_desc;
+ }
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
+ >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
+ >> I40E_RXD_QW1_ERROR_SHIFT;
+ rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_bi->skb = NULL;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * STATUS_DD bit is set
+ */
+ rmb();
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ if (rx_bi->dma) {
+ u16 len;
+
+ if (rx_hbo)
+ len = I40E_RX_HDR_SIZE;
+ else if (rx_sph)
+ len = rx_header_len;
+ else if (rx_packet_len)
+ len = rx_packet_len; /* 1buf/no split found */
+ else
+ len = rx_header_len; /* split always mode */
+
+ skb_put(skb, len);
+ dma_unmap_single(rx_ring->dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+
+ /* Get the rest of the data if this was a header split */
+ if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset,
+ rx_packet_len);
+
+ skb->len += rx_packet_len;
+ skb->data_len += rx_packet_len;
+ skb->truesize += rx_packet_len;
+
+ if ((page_count(rx_bi->page) == 1) &&
+ (page_to_nid(rx_bi->page) == current_node))
+ get_page(rx_bi->page);
+ else
+ rx_bi->page = NULL;
+
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ struct i40e_rx_buffer *next_buffer;
+
+ next_buffer = &rx_ring->rx_bi[i];
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_bi->skb = next_buffer->skb;
+ rx_bi->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ budget--;
+next_desc:
+ rx_desc->wb.qword1.status_error_len = 0;
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rx_ring->next_to_clean = i;
+ rx_ring->rx_stats.packets += total_rx_packets;
+ rx_ring->rx_stats.bytes += total_rx_bytes;
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ if (cleaned_count)
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return budget > 0;
+}
+
+/**
+ * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ bool clean_complete = true;
+ int budget_per_ring;
+ int i;
+
+ if (test_bit(__I40E_DOWN, &vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ * Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+ for (i = 0; i < q_vector->num_ringpairs; i++) {
+ clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
+ vsi->work_limit);
+ clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
+ budget_per_ring);
+ }
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete(napi);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ i40e_update_dynamic_itr(q_vector);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state)) {
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_irq_dynamic_enable(vsi,
+ q_vector->v_idx + vsi->base_vector);
+ } else {
+ struct i40e_hw *hw = &vsi->back->hw;
+ /* We re-enable the queue 0 cause, but
+ * don't worry about dynamic_enable
+ * because we left it on for the other
+ * possible interrupts during napi
+ */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+ qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+ qval = rd32(hw, I40E_QINT_TQCTL(0));
+ qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_flush(hw);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_atr - Add a Flow Director ATR filter
+ * @tx_ring: ring to add programming descriptor to
+ * @skb: send buffer
+ * @flags: send flags
+ * @protocol: wire protocol
+ **/
+static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 flags, __be16 protocol)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ struct tcphdr *th;
+ unsigned int hlen;
+ u32 flex_ptype, dtype_cmd;
+
+ /* make sure ATR is enabled */
+ if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+ return;
+
+ /* if sampling is disabled do nothing */
+ if (!tx_ring->atr_sample_rate)
+ return;
+
+ tx_ring->atr_count++;
+
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if (protocol == htons(ETH_P_IP)) {
+ if (hdr.ipv4->protocol != IPPROTO_TCP)
+ return;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ return;
+
+ hlen = sizeof(struct ipv6hdr);
+ } else {
+ return;
+ }
+
+ th = (struct tcphdr *)(hdr.network + hlen);
+
+ /* sample on all syn/fin packets or once every atr sample rate */
+ if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+ return;
+
+ tx_ring->atr_count = 0;
+
+ /* grab the next descriptor */
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
+ flex_ptype |= (protocol == htons(ETH_P_IP)) ?
+ (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
+ (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= th->fin ?
+ (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
+ (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT;
+
+ dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ /* Insert 802.1p priority into VLAN header */
+ if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
+ ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
+ (skb->priority != TC_PRIO_CONTROL))) {
+ tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
+ tx_flags |= (skb->priority & 0x7) <<
+ I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
+ if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return -ENOMEM;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(tx_flags >>
+ I40E_TX_FLAGS_VLAN_SHIFT);
+ } else {
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ }
+ }
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tx_csum - is checksum offload requested
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ *
+ * Returns true if checksum offload is requested
+ **/
+static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
+{
+ if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
+ !(tx_flags & I40E_TX_FLAGS_TXSW)) {
+ if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
+ return false;
+ }
+
+ return skb->ip_summed == CHECKSUM_PARTIAL;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+ u32 cd_cmd, cd_tso_len, cd_mss;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u32 l4len;
+ int err;
+ struct ipv6hdr *ipv6h;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb_is_gso_v6(skb)) {
+
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+ : ipv6_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+ *hdr_len = (skb->encapsulation
+ ? (skb_inner_transport_header(skb) - skb->data)
+ : skb_transport_offset(skb)) + l4len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
+ | ((u64)cd_tso_len
+ << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+ | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ struct ipv6hdr *this_ipv6_hdr;
+ unsigned int this_tcp_hdrlen;
+ struct iphdr *this_ip_hdr;
+ u32 network_hdr_len;
+ u8 l4_hdr = 0;
+
+ if (skb->encapsulation) {
+ network_hdr_len = skb_inner_network_header_len(skb);
+ this_ip_hdr = inner_ip_hdr(skb);
+ this_ipv6_hdr = inner_ipv6_hdr(skb);
+ this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ }
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ ((skb_inner_network_offset(skb) -
+ skb_transport_offset(skb)) >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else {
+ network_hdr_len = skb_network_header_len(skb);
+ this_ip_hdr = ip_hdr(skb);
+ this_ipv6_hdr = ipv6_hdr(skb);
+ this_tcp_hdrlen = tcp_hdrlen(skb);
+ }
+
+ /* Enable IP checksum offloads */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_hdr = this_ip_hdr->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ this_ip_hdr->check = 0;
+ } else {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ }
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ l4_hdr = this_ipv6_hdr->nexthdr;
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+ /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+ *td_offset |= (skb_network_offset(skb) >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (this_tcp_hdrlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+
+ if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ struct device *dev = tx_ring->dev;
+ u32 paylen = skb->len - hdr_len;
+ u16 i = tx_ring->next_to_use;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u32 buf_offset = 0;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ for (;;) {
+ while (size > I40E_MAX_DATA_PER_TXD) {
+ tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ I40E_MAX_DATA_PER_TXD, td_tag);
+
+ buf_offset += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ }
+
+ tx_bi = &tx_ring->tx_bi[i];
+ tx_bi->length = buf_offset + size;
+ tx_bi->tx_flags = tx_flags;
+ tx_bi->dma = dma;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ if (likely(!data_len))
+ break;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+ buf_offset = 0;
+ tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ frag++;
+ }
+
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ tx_bi->bytecount = paylen + (gso_segs * hdr_len);
+ tx_bi->gso_segs = gso_segs;
+ tx_bi->skb = skb;
+
+ /* set the timestamp and next to watch values */
+ first->time_stamp = jiffies;
+ first->next_to_watch = tx_desc;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(i, tx_ring->tail);
+ return;
+
+dma_error:
+ dev_info(dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ unsigned int f;
+#endif
+ int count = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return 0;
+ }
+ return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ __be16 protocol;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso;
+ if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ /* obtain protocol of skb */
+ protocol = skb->protocol;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == __constant_htons(ETH_P_IP))
+ tx_flags |= I40E_TX_FLAGS_IPV4;
+ else if (protocol == __constant_htons(ETH_P_IPV6))
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ &cd_type_cmd_tso_mss, &cd_tunneling);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ skb_tx_timestamp(skb);
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
+ /* always enable offload insertion */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ if (tx_flags & I40E_TX_FLAGS_CSUM)
+ i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ /* Add Flow Director ATR if it's enabled.
+ *
+ * NOTE: this must always be directly before the data descriptor.
+ */
+ i40e_atr(tx_ring, skb, tx_flags, protocol);
+
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+ if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = I40E_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ }
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
new file mode 100644
index 0000000..b1d7722
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -0,0 +1,259 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR 0x07FF
+#define I40E_MIN_ITR 0x0001
+#define I40E_ITR_USEC_RESOLUTION 2
+#define I40E_MAX_IRATE 0x03F
+#define I40E_MIN_IRATE 0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K 0x0005
+#define I40E_ITR_20K 0x0019
+#define I40E_ITR_8K 0x003E
+#define I40E_ITR_4K 0x007A
+#define I40E_ITR_RX_DEF I40E_ITR_8K
+#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK 256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+#define I40E_ITR_NONE 3
+#define I40E_RX_ITR 0
+#define I40E_TX_ITR 1
+#define I40E_PE_ITR 2
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512 512 /* Used for packet split */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096 4096
+#define I40E_RXBUFFER_8192 8192
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+ do { \
+ I40E_RX_NEXT_DESC((r), (i), (n)); \
+ prefetch((n)); \
+ } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN 17
+#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM (u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
+#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u32 tx_flags;
+ struct i40e_tx_desc *next_to_watch;
+ unsigned int bytecount;
+ u16 gso_segs;
+ u8 mapped_as_page;
+};
+
+struct i40e_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct i40e_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 completed;
+ u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_TX_DETECT_HANG,
+ __I40E_HANG_CHECK_ARMED,
+ __I40E_RX_PS_ENABLED,
+ __I40E_RX_LRO_ENABLED,
+ __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+ test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+ set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+ clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+ set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+ clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ };
+ unsigned long state;
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
+#define I40E_RX_DTYPE_HEADER_SPLIT 2
+ u8 hsplit;
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+ I40E_LOWEST_LATENCY = 0,
+ I40E_LOW_LATENCY = 1,
+ I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+#define I40E_MAX_RINGPAIR_PER_VECTOR 8
+ /* array of pointers to rings */
+ struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ enum i40e_latency_range latency_range;
+ u16 itr;
+};
+
+void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40e_free_tx_resources(struct i40e_ring *tx_ring);
+void i40e_free_rx_resources(struct i40e_ring *rx_ring);
+int i40e_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
new file mode 100644
index 0000000..f3f22b2
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -0,0 +1,1154 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_SFP_XL710_DEVICE_ID 0x1572
+#define I40E_SFP_X710_DEVICE_ID 0x1573
+#define I40E_QEMU_DEVICE_ID 0x1574
+#define I40E_KX_A_DEVICE_ID 0x157F
+#define I40E_KX_B_DEVICE_ID 0x1580
+#define I40E_KX_C_DEVICE_ID 0x1581
+#define I40E_KX_D_DEVICE_ID 0x1582
+#define I40E_QSFP_A_DEVICE_ID 0x1583
+#define I40E_QSFP_B_DEVICE_ID 0x1584
+#define I40E_QSFP_C_DEVICE_ID 0x1585
+#define I40E_VF_DEVICE_ID 0x154C
+#define I40E_VF_HV_DEVICE_ID 0x1571
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0000
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define I40E_IS_MULTICAST(address) \
+ (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS 6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Value 0-25 are reserved for future use */
+ I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
+ I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
+ I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Value 37 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ /* Note: Value 50-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_errors; /* repc */
+ u64 rx_missed; /* rmpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0xF
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
new file mode 100644
index 0000000..cc6654f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -0,0 +1,368 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ i40e_status v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
new file mode 100644
index 0000000..8967e58
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -0,0 +1,2335 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+/***********************misc routines*****************************/
+
+/**
+ * i40e_vc_isvalid_vsi_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vf relative vsi id
+ *
+ * check for the valid vsi id
+ **/
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+}
+
+/**
+ * i40e_vc_isvalid_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vsi id
+ * @qid: vsi relative queue id
+ *
+ * check for the valid queue id
+ **/
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+ u8 qid)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return qid < pf->vsi[vsi_id]->num_queue_pairs;
+}
+
+/**
+ * i40e_vc_isvalid_vector_id
+ * @vf: pointer to the vf info
+ * @vector_id: vf relative vector id
+ *
+ * check for the valid vector id
+ **/
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+}
+
+/***********************vf resource mgmt routines*****************/
+
+/**
+ * i40e_vc_get_pf_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue id
+ *
+ * return pf relative queue id
+ **/
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+ u8 vsi_queue_id)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+ u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+
+ if (le16_to_cpu(vsi->info.mapping_flags) &
+ I40E_AQ_VSI_QUE_MAP_NONCONTIG)
+ pf_queue_id =
+ le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
+ else
+ pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
+ vsi_queue_id;
+
+ return pf_queue_id;
+}
+
+/**
+ * i40e_ctrl_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ enum i40e_queue_ctrl ctrl)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool writeback = false;
+ u16 pf_queue_id;
+ int ret = 0;
+ u32 reg;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
+
+ switch (ctrl) {
+ case I40E_QUEUE_CTRL_ENABLE:
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_ENABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
+ break;
+ case I40E_QUEUE_CTRL_DISABLE:
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_DISABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLE:
+ reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ if (!ret) {
+ reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (writeback) {
+ wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
+ i40e_flush(hw);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_ctrl_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ enum i40e_queue_ctrl ctrl)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool writeback = false;
+ u16 pf_queue_id;
+ int ret = 0;
+ u32 reg;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
+
+ switch (ctrl) {
+ case I40E_QUEUE_CTRL_ENABLE:
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_ENABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
+ break;
+ case I40E_QUEUE_CTRL_DISABLE:
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_DISABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLE:
+ reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ if (!ret) {
+ reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (writeback) {
+ wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
+ i40e_flush(hw);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_config_irq_link_list
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vecmap: irq map info
+ *
+ * configure irq link list from the map
+ **/
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+ struct i40e_virtchnl_vector_map *vecmap)
+{
+ unsigned long linklistmap = 0, tempmap;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vsi_queue_id, pf_queue_id;
+ enum i40e_queue_type qtype;
+ u16 next_q, vector_id;
+ u32 reg, reg_idx;
+ u16 itr_idx = 0;
+
+ vector_id = vecmap->vector_id;
+ /* setup the head */
+ if (0 == vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+ * vf->vf_id) + (vector_id - 1));
+
+ if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
+ /* Special case - No queues mapped on this vector */
+ wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto irq_list_done;
+ }
+ tempmap = vecmap->rxq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ linklistmap |= (1 <<
+ (I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id));
+ vsi_queue_id =
+ find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
+ }
+
+ tempmap = vecmap->txq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ linklistmap |= (1 <<
+ (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+ + 1));
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ next_q = find_first_bit(&linklistmap,
+ (I40E_MAX_VSI_QP *
+ I40E_VIRTCHNL_SUPPORTED_QTYPES));
+ vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
+
+ wr32(hw, reg_idx, reg);
+
+ while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(pf_queue_id);
+ itr_idx = vecmap->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(pf_queue_id);
+ itr_idx = vecmap->txitr_idx;
+ break;
+ default:
+ break;
+ }
+
+ next_q = find_next_bit(&linklistmap,
+ (I40E_MAX_VSI_QP *
+ I40E_VIRTCHNL_SUPPORTED_QTYPES),
+ next_q + 1);
+ if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+ vsi_queue_id);
+ } else {
+ pf_queue_id = I40E_QUEUE_END_OF_LIST;
+ qtype = 0;
+ }
+
+ /* format for the RQCTL & TQCTL regs is same */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ wr32(hw, reg_idx, reg);
+ }
+
+irq_list_done:
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_config_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure tx queue
+ **/
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ struct i40e_virtchnl_txq_info *info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_hmc_obj_txq tx_ctx;
+ u16 pf_queue_id;
+ u32 qtx_ctl;
+ int ret = 0;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
+
+ /* only set the required fields */
+ tx_ctx.base = info->dma_ring_addr / 128;
+ tx_ctx.qlen = info->ring_len;
+ tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+ tx_ctx.rdylist_act = 0;
+
+ /* clear the context in the HMC */
+ ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to clear VF LAN Tx queue context %d, error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* set the context in the HMC */
+ ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set VF LAN Tx queue context %d error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* associate this queue with the PCI VF function */
+ qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
+ qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ & I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
+ << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+ & I40E_QTX_CTL_VFVM_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
+ i40e_flush(hw);
+
+error_context:
+ return ret;
+}
+
+/**
+ * i40e_config_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure rx queue
+ **/
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ struct i40e_virtchnl_rxq_info *info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ u16 pf_queue_id;
+ int ret = 0;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+
+ /* only set the required fields */
+ rx_ctx.base = info->dma_ring_addr / 128;
+ rx_ctx.qlen = info->ring_len;
+
+ if (info->splithdr_enabled) {
+ rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
+ I40E_RX_SPLIT_IP |
+ I40E_RX_SPLIT_TCP_UDP |
+ I40E_RX_SPLIT_SCTP;
+ /* header length validation */
+ if (info->hdr_size > ((2 * 1024) - 64)) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ /* set splitalways mode 10b */
+ rx_ctx.dtype = 0x2;
+ }
+
+ /* databuffer length validation */
+ if (info->databuffer_size > ((16 * 1024) - 128)) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+ /* max pkt. length validation */
+ if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.rxmax = info->max_pkt_size;
+
+ /* enable 32bytes desc always */
+ rx_ctx.dsize = 1;
+
+ /* default values */
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 1;
+
+ /* clear the context in the HMC */
+ ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to clear VF LAN Rx queue context %d, error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+ /* set the context in the HMC */
+ ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set VF LAN Rx queue context %d error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+error_param:
+ return ret;
+}
+
+/**
+ * i40e_alloc_vsi_res
+ * @vf: pointer to the vf info
+ * @type: type of VSI to allocate
+ *
+ * alloc vf vsi context & resources
+ **/
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
+{
+ struct i40e_mac_filter *f = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ int ret = 0;
+
+ vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev,
+ "add vsi failed for vf %d, aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto error_alloc_vsi_res;
+ }
+ if (type == I40E_VSI_SRIOV) {
+ vf->lan_vsi_index = vsi->idx;
+ vf->lan_vsi_id = vsi->id;
+ dev_info(&pf->pdev->dev,
+ "LAN VSI index %d, VSI id %d\n",
+ vsi->idx, vsi->id);
+ f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+ 0, true, false);
+ }
+ if (!f) {
+ dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
+ ret = -ENOMEM;
+ goto error_alloc_vsi_res;
+ }
+
+ /* program mac filter */
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ goto error_alloc_vsi_res;
+ }
+
+ /* accept bcast pkts. by default */
+ ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
+ vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
+ ret = -EINVAL;
+ }
+
+error_alloc_vsi_res:
+ return ret;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+int i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ int ret = -ENOENT;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, msix_vf;
+ bool rsd = false;
+ u16 pf_queue_id;
+ int i, j;
+
+ /* warn the VF */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
+
+ clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* PF triggers VFR only when VF requests, in case of
+ * VFLR, HW triggers VFR
+ */
+ if (!flr) {
+ /* reset vf using VPGEN_VFRTRIG reg */
+ reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 4; i++) {
+ /* vf reset requires driver to first reset the
+ * vf & than poll the status register to make sure
+ * that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
+ vf->vf_id);
+
+ /* fast disable qps */
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLE);
+ ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLE);
+ }
+
+ /* Queue enable/disable requires driver to
+ * first reset the vf & than poll the status register
+ * to make sure that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
+ vf->lan_vsi_index, j, vf->vf_id);
+ ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
+ vf->lan_vsi_index, j, vf->vf_id);
+ }
+
+ /* clear the irq settings */
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* set the defaults for the rqctl & tqctl registers */
+ reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+ wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
+ wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
+ }
+
+ /* clear the reset bit in the VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ /* tell the VF the reset is done */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+ i40e_flush(hw);
+
+ return ret;
+}
+
+/**
+ * i40e_enable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * enable vf mappings
+ **/
+static void i40e_enable_vf_mappings(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, total_queue_pairs = 0;
+ int j;
+
+ /* Tell the hardware we're using noncontiguous mapping. HW requires
+ * that VF queues be mapped using this method, even when they are
+ * contiguous in real life
+ */
+ wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* enable VF vplan_qtable mappings */
+ reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
+
+ /* map PF queues to VF queues */
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+ reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
+ total_queue_pairs++;
+ }
+
+ /* map PF queues to VSI */
+ for (j = 0; j < 7; j++) {
+ if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
+ reg = 0x07FF07FF; /* unused */
+ } else {
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ j * 2);
+ reg = qid;
+ qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ (j * 2) + 1);
+ reg |= qid << 16;
+ }
+ wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
+ }
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_disable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * disable vf mappings
+ **/
+static void i40e_disable_vf_mappings(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* disable qp mappings */
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
+ for (i = 0; i < I40E_MAX_VSI_QP; i++)
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
+ I40E_QUEUE_END_OF_LIST);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_free_vf_res
+ * @vf: pointer to the vf info
+ *
+ * free vf resources
+ **/
+static void i40e_free_vf_res(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ /* free vsi & disconnect it from the parent uplink */
+ if (vf->lan_vsi_index) {
+ i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
+ vf->lan_vsi_index = 0;
+ vf->lan_vsi_id = 0;
+ }
+ /* reset some of the state varibles keeping
+ * track of the resources
+ */
+ vf->num_queue_pairs = 0;
+ vf->vf_states = 0;
+}
+
+/**
+ * i40e_alloc_vf_res
+ * @vf: pointer to the vf info
+ *
+ * allocate vf resources
+ **/
+static int i40e_alloc_vf_res(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ int total_queue_pairs = 0;
+ int ret;
+
+ /* allocate hw vsi context & associated resources */
+ ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
+ if (ret)
+ goto error_alloc;
+ total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ /* store the total qps number for the runtime
+ * vf req validation
+ */
+ vf->num_queue_pairs = total_queue_pairs;
+
+ /* vf is now completely initialized */
+ set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+
+error_alloc:
+ if (ret)
+ i40e_free_vf_res(vf);
+
+ return ret;
+}
+
+/**
+ * i40e_vfs_are_assigned
+ * @pf: pointer to the pf structure
+ *
+ * Determine if any VFs are assigned to VMs
+ **/
+static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
+{
+ struct pci_dev *pdev = pf->pdev;
+ struct pci_dev *vfdev;
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ I40E_VF_DEVICE_ID,
+ vfdev);
+ }
+
+ return false;
+}
+
+/**
+ * i40e_free_vfs
+ * @pf: pointer to the pf structure
+ *
+ * free vf resources
+ **/
+void i40e_free_vfs(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ if (!pf->vf)
+ return;
+
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+
+ /* free up vf resources */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ i40e_free_vf_res(&pf->vf[i]);
+ /* disable qp mappings */
+ i40e_disable_vf_mappings(&pf->vf[i]);
+ }
+
+ kfree(pf->vf);
+ pf->vf = NULL;
+ pf->num_alloc_vfs = 0;
+
+ if (!i40e_vfs_are_assigned(pf))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev,
+ "unable to disable SR-IOV because VFs are assigned.\n");
+
+ /* Re-enable interrupt 0. */
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
+ i40e_flush(hw);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * i40e_alloc_vfs
+ * @pf: pointer to the pf structure
+ * @num_alloc_vfs: number of vfs to allocate
+ *
+ * allocate vf resources
+ **/
+static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+{
+ struct i40e_vf *vfs;
+ int i, ret = 0;
+
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "pci_enable_sriov failed with error %d!\n", ret);
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
+
+ /* allocate memory */
+ vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* apply default profile */
+ for (i = 0; i < num_alloc_vfs; i++) {
+ vfs[i].pf = pf;
+ vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
+ vfs[i].vf_id = i;
+
+ /* assign default capabilities */
+ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+
+ ret = i40e_alloc_vf_res(&vfs[i]);
+ i40e_reset_vf(&vfs[i], true);
+ if (ret)
+ break;
+
+ /* enable vf vplan_qtable mappings */
+ i40e_enable_vf_mappings(&vfs[i]);
+ }
+ pf->vf = vfs;
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+err_alloc:
+ if (ret)
+ i40e_free_vfs(pf);
+err_iov:
+ return ret;
+}
+
+#endif
+/**
+ * i40e_pci_sriov_enable
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs
+ **/
+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int pre_existing_vfs = pci_num_vf(pdev);
+ int err = 0;
+
+ dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ i40e_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ goto out;
+
+ if (num_vfs > pf->num_req_vfs) {
+ err = -EPERM;
+ goto err_out;
+ }
+
+ err = i40e_alloc_vfs(pf, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
+ goto err_out;
+ }
+
+out:
+ return num_vfs;
+
+err_out:
+ return err;
+#endif
+ return 0;
+}
+
+/**
+ * i40e_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ if (num_vfs)
+ return i40e_pci_sriov_enable(pdev, num_vfs);
+
+ i40e_free_vfs(pf);
+ return 0;
+}
+
+/***********************virtual channel routines******************/
+
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the vf info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to vf
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+
+ /* single place to detect unsuccessful return values */
+ if (v_retval) {
+ vf->num_invalid_msgs++;
+ dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
+ v_opcode, v_retval);
+ if (vf->num_invalid_msgs >
+ I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
+ dev_err(&pf->pdev->dev,
+ "Number of invalid messages exceeded for VF %d\n",
+ vf->vf_id);
+ dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ }
+ } else {
+ vf->num_valid_msgs++;
+ }
+
+ aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vc_send_resp_to_vf
+ * @vf: pointer to the vf info
+ * @opcode: operation code
+ * @retval: return value
+ *
+ * send resp msg to vf
+ **/
+static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+ enum i40e_virtchnl_ops opcode,
+ i40e_status retval)
+{
+ return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+}
+
+/**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the vf info
+ *
+ * called from the vf to request the API version used by the PF
+ **/
+static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_version_info info = {
+ I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
+ };
+
+ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS, (u8 *)&info,
+ sizeof(struct
+ i40e_virtchnl_version_info));
+}
+
+/**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to request its resources
+ **/
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_vf_resource *vfres = NULL;
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+ int i = 0, len = 0;
+ int num_vsis = 1;
+ int ret;
+
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ len = (sizeof(struct i40e_virtchnl_vf_resource) +
+ sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
+
+ vfres = kzalloc(len, GFP_KERNEL);
+ if (!vfres) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!vsi->info.pvid)
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->num_vsis = num_vsis;
+ vfres->num_queue_pairs = vf->num_queue_pairs;
+ vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ if (vf->lan_vsi_index) {
+ vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+ vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[i].num_queue_pairs =
+ pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+ memcpy(vfres->vsi_res[i].default_mac_addr,
+ vf->default_lan_addr.addr, ETH_ALEN);
+ i++;
+ }
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+err:
+ /* send the response back to the vf */
+ ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ aq_ret, (u8 *)vfres, len);
+
+ kfree(vfres);
+ return ret;
+}
+
+/**
+ * i40e_vc_reset_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to reset itself,
+ * unlike other virtchnl messages, pf driver
+ * doesn't send the response back to the vf
+ **/
+static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+{
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ return -ENOENT;
+
+ return i40e_reset_vf(vf, false);
+}
+
+/**
+ * i40e_vc_config_promiscuous_mode_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the promiscuous mode of
+ * vf vsis
+ **/
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
+ u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_promisc_info *info =
+ (struct i40e_virtchnl_promisc_info *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool allmulti = false;
+ bool promisc = false;
+ i40e_status aq_ret;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+ (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ promisc = true;
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
+ promisc, NULL);
+ if (aq_ret)
+ goto error_param;
+
+ if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ allmulti = true;
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
+ allmulti, NULL);
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the rx/tx
+ * queues
+ **/
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *qci =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ struct i40e_virtchnl_queue_pair_info *qpi;
+ u16 vsi_id, vsi_queue_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi_id = qci->vsi_id;
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ vsi_queue_id = qpi->txq.queue_id;
+ if ((qpi->txq.vsi_id != vsi_id) ||
+ (qpi->rxq.vsi_id != vsi_id) ||
+ (qpi->rxq.queue_id != vsi_queue_id) ||
+ !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
+ &qpi->rxq) ||
+ i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
+ &qpi->txq)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_irq_map_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the irq to
+ * queue map
+ **/
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_irq_map_info *irqmap_info =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+ struct i40e_virtchnl_vector_map *map;
+ u16 vsi_id, vsi_queue_id, vector_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < irqmap_info->num_vectors; i++) {
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* validate msg params */
+ if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ tempmap = map->rxq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ tempmap = map->txq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ i40e_config_irq_link_list(vf, vsi_id, map);
+ }
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_enable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to enable all or specific queue(s)
+ **/
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ u16 vsi_id = vqs->vsi_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ u16 queue_id;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ /* Poll the status register to make sure that the
+ * requested op was completed successfully
+ */
+ udelay(10);
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on RX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on TX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_disable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to disable all or specific
+ * queue(s)
+ **/
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ u16 vsi_id = vqs->vsi_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ u16 queue_id;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ /* Poll the status register to make sure that the
+ * requested op was completed successfully
+ */
+ udelay(10);
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on RX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on TX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_get_stats_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to get vsi stats
+ **/
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_eth_stats stats;
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+
+ memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vqs->vsi_id];
+ if (!vsi) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_update_eth_stats(vsi);
+ memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+
+error_param:
+ /* send the response back to the vf */
+ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * i40e_vc_add_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * add guest mac address filter
+ **/
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_ether_addr_list *al =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = al->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ if (is_broadcast_ether_addr(al->list[i].addr) ||
+ is_zero_ether_addr(al->list[i].addr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
+ al->list[i].addr);
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ vsi = pf->vsi[vsi_id];
+
+ /* add new addresses to the list */
+ for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
+
+ f = i40e_find_mac(vsi, al->list[i].addr, true, false);
+ if (f) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
+ true, false);
+ else
+ f = i40e_add_filter(vsi, al->list[i].addr, -1,
+ true, false);
+ }
+
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF MAC filter\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ /* program the updated filter list */
+ if (i40e_sync_vsi_filters(vsi))
+ dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_del_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove guest mac address filter
+ **/
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_ether_addr_list *al =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = al->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi = pf->vsi[vsi_id];
+
+ /* delete addresses from the list */
+ for (i = 0; i < al->num_elements; i++)
+ i40e_del_filter(vsi, al->list[i].addr,
+ I40E_VLAN_ANY, true, false);
+
+ /* program the updated filter list */
+ if (i40e_sync_vsi_filters(vsi))
+ dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * program guest vlan id
+ **/
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vfl->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+ aq_ret = I40E_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+ vsi = pf->vsi[vsi_id];
+ if (vsi->info.pvid) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ i40e_vlan_stripping_enable(vsi);
+ for (i = 0; i < vfl->num_elements; i++) {
+ /* add new VLAN filter */
+ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF vlan filter %d, error %d\n",
+ vfl->vlan_id[i], ret);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_remove_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove programmed guest vlan id
+ **/
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vfl->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ vsi = pf->vsi[vsi_id];
+ if (vsi->info.pvid) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Unable to delete VF vlan filter %d, error %d\n",
+ vfl->vlan_id[i], ret);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_fcoe_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf for the fcoe msgs
+ **/
+static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ aq_ret = I40E_ERR_NOT_IMPLEMENTED;
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
+}
+
+/**
+ * i40e_vc_validate_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * validate msg
+ **/
+static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len;
+
+ /* Check if VF is disabled. */
+ if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
+ return I40E_ERR_PARAM;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct i40e_virtchnl_version_info);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ valid_len = 0;
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct i40e_virtchnl_txq_info);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_vsi_queue_config_info *vqc =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ i40e_virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_irq_map_info *vimi =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_ether_addr_list *veal =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct i40e_virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ break;
+ /* These are always errors coming from the VF. */
+ case I40E_VIRTCHNL_OP_EVENT:
+ case I40E_VIRTCHNL_OP_UNKNOWN:
+ default:
+ return -EPERM;
+ break;
+ }
+ /* few more checks */
+ if ((valid_len != msglen) || (err_msg_format)) {
+ i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * i40e_vc_process_vf_msg
+ * @pf: pointer to the pf structure
+ * @vf_id: source vf id
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * called from the common aeq/arq handler to
+ * process request from vf
+ **/
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ struct i40e_vf *vf = &(pf->vf[vf_id]);
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ pf->vf_aq_requests++;
+ /* perform basic checks on the msg */
+ ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
+
+ if (ret) {
+ dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+ return ret;
+ }
+ wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ ret = i40e_vc_get_version_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ ret = i40e_vc_get_vf_resources_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ ret = i40e_vc_reset_vf_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ ret = i40e_vc_config_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ ret = i40e_vc_get_stats_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_FCOE:
+ ret = i40e_vc_fcoe_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(&pf->pdev->dev,
+ "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+ ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
+ I40E_ERR_NOT_IMPLEMENTED);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vc_process_vflr_event
+ * @pf: pointer to the pf structure
+ *
+ * called from the vlfr irq handler to
+ * free up vf resources and state variables
+ **/
+int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+{
+ u32 reg, reg_idx, bit_idx, vf_id;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+
+ if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ return 0;
+
+ clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr vfs */
+ vf = &pf->vf[vf_id];
+ reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+ if (reg & (1 << bit_idx)) {
+ /* clear the bit in GLGEN_VFLRSTAT */
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+
+ if (i40e_reset_vf(vf, true))
+ dev_err(&pf->pdev->dev,
+ "Unable to reset the VF %d\n", vf_id);
+ /* free up vf resources to destroy vsi state */
+ i40e_free_vf_res(vf);
+
+ /* allocate new vf resources with the default state */
+ if (i40e_alloc_vf_res(vf))
+ dev_err(&pf->pdev->dev,
+ "Unable to allocate VF resources %d\n",
+ vf_id);
+
+ i40e_enable_vf_mappings(vf);
+ }
+ }
+
+ /* re-enable vflr interrupt cause */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the pf structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg,
+ u16 msglen)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ vf++;
+ }
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the pf structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ pfe.event_data.link_event.link_status =
+ pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
+
+ i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+ (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the pf structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+ (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the vf structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (u8 *)&pfe,
+ sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
+
+/**
+ * i40e_ndo_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @mac: mac address
+ *
+ * program vf mac address
+ **/
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_mac_filter *f;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev,
+ "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF ethernet address\n");
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ /* delete the temporary mac address */
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+
+ /* add the new mac address */
+ f = i40e_add_filter(vsi, mac, 0, true, false);
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF ucast filter\n");
+ ret = -ENOMEM;
+ goto error_param;
+ }
+
+ dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
+ /* program mac filter */
+ if (i40e_sync_vsi_filters(vsi)) {
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ ret = -EIO;
+ goto error_param;
+ }
+ memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+ ret = 0;
+
+error_param:
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ *
+ * program vf vlan id and/or qos
+ **/
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+ int vf_id, u16 vlan_id, u8 qos)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ if (vsi->info.pvid) {
+ /* kill old VLAN */
+ ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "remove VLAN failed, ret=%d, aq_err=%d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+ }
+ if (vlan_id || qos)
+ ret = i40e_vsi_add_pvid(vsi,
+ vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+ else
+ i40e_vlan_stripping_disable(vsi);
+
+ if (vlan_id) {
+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan_id, qos, vf_id);
+
+ /* add new VLAN filter */
+ ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+ vsi->back->hw.aq.asq_last_status);
+ goto error_pvid;
+ }
+ }
+
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
+ goto error_pvid;
+ }
+ ret = 0;
+
+error_pvid:
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_bw
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @tx_rate: tx rate
+ *
+ * configure vf tx rate
+ **/
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ndo_get_vf_config
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @ivi: vf configuration structure
+ *
+ * return vf configuration
+ **/
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ /* first vsi is always the LAN vsi */
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ ivi->vf = vf_id;
+
+ /* first entry of the list is the default ethernet address */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
+ break;
+ }
+
+ ivi->tx_rate = 0;
+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
+ I40E_VLAN_PRIORITY_SHIFT;
+ ret = 0;
+
+error_param:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
new file mode 100644
index 0000000..360382c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_PF_H_
+#define _I40E_VIRTCHNL_PF_H_
+
+#include "i40e.h"
+
+#define I40E_MAX_MACVLAN_FILTERS 256
+#define I40E_MAX_VLAN_FILTERS 256
+#define I40E_MAX_VLANID 4095
+
+#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
+
+#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
+#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
+
+#define I40E_VLAN_PRIORITY_SHIFT 12
+#define I40E_VLAN_MASK 0xFFF
+#define I40E_PRIORITY_MASK 0x7000
+
+/* Various queue ctrls */
+enum i40e_queue_ctrl {
+ I40E_QUEUE_CTRL_UNKNOWN = 0,
+ I40E_QUEUE_CTRL_ENABLE,
+ I40E_QUEUE_CTRL_ENABLECHECK,
+ I40E_QUEUE_CTRL_DISABLE,
+ I40E_QUEUE_CTRL_DISABLECHECK,
+ I40E_QUEUE_CTRL_FASTDISABLE,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK,
+};
+
+/* VF states */
+enum i40e_vf_states {
+ I40E_VF_STAT_INIT = 0,
+ I40E_VF_STAT_ACTIVE,
+ I40E_VF_STAT_FCOEENA,
+ I40E_VF_STAT_DISABLED,
+};
+
+/* VF capabilities */
+enum i40e_vf_capabilities {
+ I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+ I40E_VIRTCHNL_VF_CAP_L2,
+};
+
+/* VF information structure */
+struct i40e_vf {
+ struct i40e_pf *pf;
+
+ /* vf id in the pf space */
+ u16 vf_id;
+ /* all vf vsis connect to the same parent */
+ enum i40e_switch_element_types parent_type;
+
+ /* vf Port Extender (PE) stag if used */
+ u16 stag;
+
+ struct i40e_virtchnl_ether_addr default_lan_addr;
+ struct i40e_virtchnl_ether_addr default_fcoe_addr;
+
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
+ u8 lan_vsi_index; /* index into PF struct */
+ u8 lan_vsi_id; /* ID as used by firmware */
+
+ u8 num_queue_pairs; /* num of qps assigned to vf vsis */
+ u64 num_mdd_events; /* num of mdd events detected */
+ u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
+ u64 num_valid_msgs; /* num of valid msgs detected */
+
+ unsigned long vf_caps; /* vf's adv. capabilities */
+ unsigned long vf_states; /* vf's runtime states */
+};
+
+void i40e_free_vfs(struct i40e_pf *pf);
+int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen);
+int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
+
+/* vf configuration related iplink handlers */
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+ int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+ int vf_id, struct ifla_vf_info *ivi);
+void i40e_vc_notify_link_state(struct i40e_pf *pf);
+void i40e_vc_notify_reset(struct i40e_pf *pf);
+
+#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 79b5835..47c2d10 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -719,6 +719,10 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
u32 ctrl_ext;
u32 mdic;
+ /* Extra read required for some PHY's on i354 */
+ if (hw->mac.type == e1000_i354)
+ igb_get_phy_id(hw);
+
/* For SGMII PHYs, we try the list of possible addresses until
* we find one that works. For non-SGMII PHYs
* (e.g. integrated copper PHYs), an address of 1 should
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index f0dfd41..298f0ed 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -712,6 +712,7 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
static s32 igb_set_default_fc(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ u16 lan_offset;
u16 nvm_data;
/* Read and store word 0x0F of the EEPROM. This word contains bits
@@ -722,7 +723,14 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
* control setting, then the variable hw->fc will
* be initialized based on a value in the EEPROM.
*/
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+ if (hw->mac.type == e1000_i350) {
+ lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
+ + lan_offset, 1, &nvm_data);
+ } else {
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
+ 1, &nvm_data);
+ }
if (ret_val) {
hw_dbg("NVM Read Error\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0e1b973..e8649ab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -160,6 +160,13 @@ static int ixgbe_get_settings(struct net_device *netdev,
bool autoneg = false;
bool link_up;
+ /* SFP type is needed for get_link_capabilities */
+ if (hw->phy.media_type & (ixgbe_media_type_fiber |
+ ixgbe_media_type_fiber_qsfp)) {
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ hw->phy.ops.identify_sfp(hw);
+ }
+
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
@@ -186,6 +193,11 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= ADVERTISED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+ if (hw->phy.multispeed_fiber && !autoneg) {
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
}
if (autoneg) {
@@ -314,6 +326,14 @@ static int ixgbe_set_settings(struct net_device *netdev,
if (ecmd->advertising & ~ecmd->supported)
return -EINVAL;
+ /* only allow one speed at a time if no autoneg */
+ if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
+ if (ecmd->advertising ==
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full))
+ return -EINVAL;
+ }
+
old = hw->phy.autoneg_advertised;
advertised = 0;
if (ecmd->advertising & ADVERTISED_10000baseT_Full)
@@ -1805,6 +1825,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
unsigned int size = 1024;
netdev_tx_t tx_ret_val;
struct sk_buff *skb;
+ u32 flags_orig = adapter->flags;
+
+ /* DCB can modify the frames on Tx */
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
/* allocate test skb */
skb = alloc_skb(size, GFP_KERNEL);
@@ -1857,6 +1881,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
/* free the original skb */
kfree_skb(skb);
+ adapter->flags = flags_orig;
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7aba452..0ade0cd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3571,7 +3571,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int i;
- u32 rxctrl;
+ u32 rxctrl, rfctl;
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -3580,6 +3580,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
ixgbe_setup_psrtype(adapter);
ixgbe_setup_rdrxctl(adapter);
+ /* RSC Setup */
+ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ rfctl &= ~IXGBE_RFCTL_RSC_DIS;
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+
/* Program registers for the distribution of queues */
ixgbe_setup_mrqc(adapter);
@@ -5993,8 +6000,16 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+
+ /* setup the highest link when no autoneg */
+ if (!autoneg) {
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ }
+ }
+
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6442cf8..10775cb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1861,6 +1861,7 @@ enum {
#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS 0x00000020
#define IXGBE_RFCTL_NFSW_DIS 0x00000040
#define IXGBE_RFCTL_NFSR_DIS 0x00000080
#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 270e65f..a36fa80 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -996,14 +996,14 @@ static int korina_open(struct net_device *dev)
* that handles the Done Finished
* Ovr and Und Events */
ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
- IRQF_DISABLED, "Korina ethernet Rx", dev);
+ 0, "Korina ethernet Rx", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
dev->name, lp->rx_irq);
goto err_release;
}
ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
- IRQF_DISABLED, "Korina ethernet Tx", dev);
+ 0, "Korina ethernet Tx", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
dev->name, lp->tx_irq);
@@ -1012,7 +1012,7 @@ static int korina_open(struct net_device *dev)
/* Install handler for overrun error. */
ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
- IRQF_DISABLED, "Ethernet Overflow", dev);
+ 0, "Ethernet Overflow", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
dev->name, lp->ovr_irq);
@@ -1021,7 +1021,7 @@ static int korina_open(struct net_device *dev)
/* Install handler for underflow error. */
ret = request_irq(lp->und_irq, korina_und_interrupt,
- IRQF_DISABLED, "Ethernet Underflow", dev);
+ 0, "Ethernet Underflow", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
dev->name, lp->und_irq);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index bfdb0686..6a6c1f7 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -282,8 +282,7 @@ ltq_etop_hw_init(struct net_device *dev)
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);
- request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
- "etop_tx", priv);
+ request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
} else if (IS_RX(i)) {
ltq_dma_alloc_rx(&ch->dma);
for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -291,8 +290,7 @@ ltq_etop_hw_init(struct net_device *dev)
if (ltq_etop_alloc_skb(ch))
return -ENOMEM;
ch->dma.desc = 0;
- request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
- "etop_rx", priv);
+ request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
}
ch->dma.irq = irq;
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 4ae0c74..fff6246 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1123,8 +1123,7 @@ static int pxa168_eth_open(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int err;
- err = request_irq(dev->irq, pxa168_eth_int_handler,
- IRQF_DISABLED, dev->name, dev);
+ err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
if (err) {
dev_err(&dev->dev, "can't assign irq\n");
return -EAGAIN;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ef94a59..1a9c4f6 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3092,6 +3092,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
if (!nskb)
goto resubmit;
+ skb = e->skb;
+ prefetch(skb->data);
+
if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
dev_kfree_skb(nskb);
goto resubmit;
@@ -3101,8 +3104,6 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
- skb = e->skb;
- prefetch(skb->data);
}
skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a28cd80..0c75098 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -53,9 +53,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_cq[i].moder_cnt = priv->tx_frames;
priv->tx_cq[i].moder_time = priv->tx_usecs;
- err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
- if (err)
- return err;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+ if (err)
+ return err;
+ }
}
if (priv->adaptive_rx_coal)
@@ -65,9 +67,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
priv->rx_cq[i].moder_cnt = priv->rx_frames;
priv->rx_cq[i].moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
- err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
- if (err)
- return err;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ if (err)
+ return err;
+ }
}
return err;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 0fba153..075f4e2 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -915,7 +915,7 @@ static int ks_net_open(struct net_device *netdev)
struct ks_net *ks = netdev_priv(netdev);
int err;
-#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
+#define KS_INT_FLAGS IRQF_TRIGGER_LOW
/* lock the card, even if we may not actually do anything
* else at the moment.
*/
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index c20766c..79257f7 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -83,8 +83,7 @@ static int jazzsonic_open(struct net_device* dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index c2e0256..4da172a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -95,8 +95,7 @@ static int xtsonic_open(struct net_device *dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c498181..5b65356 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1219,7 +1219,7 @@ static int pasemi_mac_open(struct net_device *dev)
snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
dev->name);
- ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
+ ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0,
mac->tx_irq_name, mac->tx);
if (ret) {
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
@@ -1230,7 +1230,7 @@ static int pasemi_mac_open(struct net_device *dev)
snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
dev->name);
- ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
+ ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0,
mac->rx_irq_name, mac->rx);
if (ret) {
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 88349b8..81bf836 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -430,7 +430,7 @@ struct qlcnic_hardware_context {
u8 diag_test;
u8 num_msix;
u8 nic_mode;
- char diag_cnt;
+ int diag_cnt;
u16 max_uc_count;
u16 port_type;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 652cc13..392b9bd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1561,6 +1561,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
{
int err;
+ adapter->need_fw_reset = 0;
qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
qlcnic_83xx_enable_mbx_interrupt(adapter);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f87f2c..3397cee 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4231,6 +4231,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 8b71525..0889212 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -7,7 +7,7 @@ config SFC
select I2C_ALGOBIT
select PTP_1588_CLOCK
---help---
- This driver supports 10-gigabit Ethernet cards based on
+ This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC4000, SFC9000-family and SFC9100-family
controllers.
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 5f42313..9f18ae9 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -94,7 +94,7 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
}
-static int efx_ef10_init_capabilities(struct efx_nic *efx)
+static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -107,16 +107,27 @@ static int efx_ef10_init_capabilities(struct efx_nic *efx)
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
+ if (outlen < sizeof(outbuf)) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to read datapath firmware capabilities\n");
+ return -EIO;
+ }
- if (outlen >= sizeof(outbuf)) {
- nic_data->datapath_caps =
- MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
- if (!(nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
- netif_err(efx, drv, efx->net_dev,
- "Capabilities don't indicate TSO support.\n");
- return -ENODEV;
- }
+ nic_data->datapath_caps =
+ MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
+ netif_err(efx, drv, efx->net_dev,
+ "current firmware does not support TSO\n");
+ return -ENODEV;
+ }
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
+ netif_err(efx, probe, efx->net_dev,
+ "current firmware does not support an RX prefix\n");
+ return -ENODEV;
}
return 0;
@@ -217,21 +228,13 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
- rc = efx_ef10_init_capabilities(efx);
+ rc = efx_ef10_init_datapath_caps(efx);
if (rc < 0)
goto fail3;
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
- if (!(nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
- netif_err(efx, probe, efx->net_dev,
- "current firmware does not support an RX prefix\n");
- rc = -ENODEV;
- goto fail3;
- }
-
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
goto fail3;
@@ -260,8 +263,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
- efx_ptp_probe(efx);
-
return 0;
fail3:
@@ -342,6 +343,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+ if (nic_data->must_check_datapath_caps) {
+ rc = efx_ef10_init_datapath_caps(efx);
+ if (rc)
+ return rc;
+ nic_data->must_check_datapath_caps = false;
+ }
+
if (nic_data->must_realloc_vis) {
/* We cannot let the number of VIs change now */
rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
@@ -710,6 +718,14 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
nic_data->must_restore_filters = true;
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ /* The datapath firmware might have been changed */
+ nic_data->must_check_datapath_caps = true;
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * statistic that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+
return -EIO;
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 8d33da6..7b6be61 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -556,6 +556,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
default: return -EINVAL;
}
} else {
@@ -841,6 +842,7 @@ static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4b1e188..fda29d3 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -400,6 +400,8 @@ enum {
* @rx_rss_context: Firmware handle for our RSS context
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
+ * after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
*/
@@ -413,6 +415,7 @@ struct efx_ef10_nic_data {
u32 rx_rss_context;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
+ bool must_check_datapath_caps;
u32 datapath_caps;
};
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 370e13d..5730fe2 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -271,7 +271,7 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
-#define SMC_IRQ_FLAGS (IRQF_DISABLED)
+#define SMC_IRQ_FLAGS 0
#else
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index ffa5c4a..5f9e79f 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1356,8 +1356,7 @@ static int smsc9420_open(struct net_device *dev)
smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
smsc9420_pci_flush_write(pd);
- result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
- DRV_NAME, pd);
+ result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
if (result) {
smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
result = -ENODEV;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 949076f..13e6fff 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1734,7 +1734,8 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
unsigned int data_len = skb->len - sh_len;
unsigned char *data = skb->data;
unsigned int ih_off, th_off, p_len;
- unsigned int isum_seed, tsum_seed, id, seq;
+ unsigned int isum_seed, tsum_seed, seq;
+ unsigned int uninitialized_var(id);
int is_ipv6;
long f_id = -1; /* id of the current fragment */
long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
@@ -1781,7 +1782,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
} else {
ih = (struct iphdr *)(buf + ih_off);
ih->tot_len = htons(sh_len + p_len - ih_off);
- ih->id = htons(id);
+ ih->id = htons(id++);
ih->check = csum_long(isum_seed + ih->tot_len +
ih->id) ^ 0xffff;
}
@@ -1818,7 +1819,6 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
slot++;
}
- id++;
seq += p_len;
/* The last segment may be less than gso_size. */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9c805e0..f7f2ef4 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1726,7 +1726,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
goto fail_alloc_irq;
}
result = request_irq(card->irq, gelic_card_interrupt,
- IRQF_DISABLED, netdev->name, card);
+ 0, netdev->name, card);
if (result) {
dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 510b9c8..31bcb98 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1488,7 +1488,7 @@ static void
toshoboe_close (struct pci_dev *pci_dev)
{
int i;
- struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
IRDA_DEBUG (4, "%s()\n", __func__);
@@ -1696,7 +1696,7 @@ freeself:
static int
toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
{
- struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
unsigned long flags;
int i = 10;
@@ -1725,7 +1725,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
static int
toshoboe_wakeup (struct pci_dev *pci_dev)
{
- struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
unsigned long flags;
IRDA_DEBUG (4, "%s()\n", __func__);
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index f07c340..3f138ca 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -191,8 +191,8 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
goto error;
ret = 0;
- error:
- return ret;
+error:
+ return ret;
}
/* Setup a communication between mcs7780 and agilent chip. */
@@ -501,8 +501,11 @@ static inline int mcs_setup_urbs(struct mcs_cb *mcs)
return 0;
mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!mcs->rx_urb)
+ if (!mcs->rx_urb) {
+ usb_free_urb(mcs->tx_urb);
+ mcs->tx_urb = NULL;
return 0;
+ }
return 1;
}
@@ -643,9 +646,9 @@ static int mcs_speed_change(struct mcs_cb *mcs)
ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
mcs->speed = mcs->new_speed;
- error:
- mcs->new_speed = 0;
- return ret;
+error:
+ mcs->new_speed = 0;
+ return ret;
}
/* Ioctl calls not supported at this time. Can be an area of future work. */
@@ -738,17 +741,20 @@ static int mcs_net_open(struct net_device *netdev)
ret = mcs_receive_start(mcs);
if (ret)
- goto error3;
+ goto error4;
netif_start_queue(netdev);
return 0;
- error3:
- irlap_close(mcs->irlap);
- error2:
- kfree_skb(mcs->rx_buff.skb);
- error1:
- return ret;
+error4:
+ usb_free_urb(mcs->rx_urb);
+ usb_free_urb(mcs->tx_urb);
+error3:
+ irlap_close(mcs->irlap);
+error2:
+ kfree_skb(mcs->rx_buff.skb);
+error1:
+ return ret;
}
/* Receive callback function. */
@@ -946,11 +952,11 @@ static int mcs_probe(struct usb_interface *intf,
usb_set_intfdata(intf, mcs);
return 0;
- error2:
- free_netdev(ndev);
+error2:
+ free_netdev(ndev);
- error1:
- return ret;
+error1:
+ return ret;
}
/* The current device is removed, the USB layer tells us to shut down. */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 5f47584..c5bd58b 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -543,7 +543,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
int crclen, len = 0;
struct sk_buff *skb;
int ret = 0;
- struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);
+ struct net_device *ndev = pci_get_drvdata(r->pdev);
vlsi_irda_dev_t *idev = netdev_priv(ndev);
pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index fcbf680..a17d85a 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -146,6 +146,7 @@ static int loopback_dev_init(struct net_device *dev)
static void loopback_dev_free(struct net_device *dev)
{
+ dev_net(dev)->loopback_dev = NULL;
free_percpu(dev->lstats);
free_netdev(dev);
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 64dfaa3..9bf46bd 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -118,8 +118,6 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
const struct ethhdr *eth, bool local)
{
struct net_device *dev = vlan->dev;
- if (!skb)
- return NET_RX_DROP;
if (local)
return vlan->forward(dev, skb);
@@ -171,9 +169,13 @@ static void macvlan_broadcast(struct sk_buff *skb,
hash = mc_hash(vlan, eth->h_dest);
if (!test_bit(hash, vlan->mc_filter))
continue;
+
+ err = NET_RX_DROP;
nskb = skb_clone(skb, GFP_ATOMIC);
- err = macvlan_broadcast_one(nskb, vlan, eth,
- mode == MACVLAN_MODE_BRIDGE);
+ if (likely(nskb))
+ err = macvlan_broadcast_one(
+ nskb, vlan, eth,
+ mode == MACVLAN_MODE_BRIDGE);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dcb2134..adeee61 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -684,15 +684,12 @@ restart:
case NETDEV_RELEASE:
case NETDEV_JOIN:
case NETDEV_UNREGISTER:
- /*
- * rtnl_lock already held
+ /* rtnl_lock already held
* we might sleep in __netpoll_cleanup()
*/
spin_unlock_irqrestore(&target_list_lock, flags);
- mutex_lock(&nt->mutex);
__netpoll_cleanup(&nt->np);
- mutex_unlock(&nt->mutex);
spin_lock_irqsave(&target_list_lock, flags);
dev_put(nt->np.dev);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index db472ff..313a037 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -30,9 +30,9 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* Cicada Extended Control Register 1 */
#define MII_CIS8201_EXT_CON1 0x17
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 6fa5ae0..0180531 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
nf_reset(skb);
skb->ip_summed = CHECKSUM_NONE;
- ip_select_ident(iph, &rt->dst, NULL);
+ ip_select_ident(skb, &rt->dst, NULL);
ip_send_check(iph);
ip_local_out(skb);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a639de8..807815f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1641,11 +1641,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false);
if (err < 0)
- goto err_free_dev;
+ goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
- goto err_free_dev;
+ goto err_detach;
if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
@@ -1689,7 +1689,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
- err_free_dev:
+err_detach:
+ tun_detach_all(dev);
+err_free_flow:
+ tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
+err_free_dev:
free_netdev(dev);
return err;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 03ad4dc..2023f3e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -33,7 +33,7 @@
#include <linux/usb/usbnet.h>
-#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
+#if IS_ENABLED(CONFIG_USB_NET_RNDIS_HOST)
static int is_rndis(struct usb_interface_descriptor *desc)
{
@@ -69,8 +69,7 @@ static const u8 mbm_guid[16] = {
0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
};
-/*
- * probes control interface, claims data interface, collects the bulk
+/* probes control interface, claims data interface, collects the bulk
* endpoints, activates data interface (if needed), maybe sets MTU.
* all pure cdc, except for certain firmware workarounds, and knowing
* that rndis uses one different rule.
@@ -88,7 +87,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
struct usb_cdc_mdlm_desc *desc = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
- if (sizeof dev->data < sizeof *info)
+ if (sizeof(dev->data) < sizeof(*info))
return -EDOM;
/* expect strict spec conformance for the descriptors, but
@@ -126,10 +125,10 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
is_activesync(&intf->cur_altsetting->desc) ||
is_wireless_rndis(&intf->cur_altsetting->desc));
- memset(info, 0, sizeof *info);
+ memset(info, 0, sizeof(*info));
info->control = intf;
while (len > 3) {
- if (buf [1] != USB_DT_CS_INTERFACE)
+ if (buf[1] != USB_DT_CS_INTERFACE)
goto next_desc;
/* use bDescriptorSubType to identify the CDC descriptors.
@@ -139,14 +138,14 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* in favor of a complicated OID-based RPC scheme doing what
* CDC Ethernet achieves with a simple descriptor.
*/
- switch (buf [2]) {
+ switch (buf[2]) {
case USB_CDC_HEADER_TYPE:
if (info->header) {
dev_dbg(&intf->dev, "extra CDC header\n");
goto bad_desc;
}
info->header = (void *) buf;
- if (info->header->bLength != sizeof *info->header) {
+ if (info->header->bLength != sizeof(*info->header)) {
dev_dbg(&intf->dev, "CDC header len %u\n",
info->header->bLength);
goto bad_desc;
@@ -175,7 +174,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
info->u = (void *) buf;
- if (info->u->bLength != sizeof *info->u) {
+ if (info->u->bLength != sizeof(*info->u)) {
dev_dbg(&intf->dev, "CDC union len %u\n",
info->u->bLength);
goto bad_desc;
@@ -233,7 +232,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
info->ether = (void *) buf;
- if (info->ether->bLength != sizeof *info->ether) {
+ if (info->ether->bLength != sizeof(*info->ether)) {
dev_dbg(&intf->dev, "CDC ether len %u\n",
info->ether->bLength);
goto bad_desc;
@@ -274,8 +273,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
break;
}
next_desc:
- len -= buf [0]; /* bLength */
- buf += buf [0];
+ len -= buf[0]; /* bLength */
+ buf += buf[0];
}
/* Microsoft ActiveSync based and some regular RNDIS devices lack the
@@ -379,9 +378,7 @@ void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
-/*-------------------------------------------------------------------------
- *
- * Communications Device Class, Ethernet Control model
+/* Communications Device Class, Ethernet Control model
*
* Takes two interfaces. The DATA interface is inactive till an altsetting
* is selected. Configuration data includes class descriptors. There's
@@ -389,8 +386,7 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
*
* This should interop with whatever the 2.4 "CDCEther.c" driver
* (by Brad Hards) talked with, with more functionality.
- *
- *-------------------------------------------------------------------------*/
+ */
static void dumpspeed(struct usbnet *dev, __le32 *speeds)
{
@@ -404,7 +400,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
{
struct usb_cdc_notification *event;
- if (urb->actual_length < sizeof *event)
+ if (urb->actual_length < sizeof(*event))
return;
/* SPEED_CHANGE can get split into two 8-byte packets */
@@ -423,7 +419,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
urb->actual_length);
- if (urb->actual_length != (sizeof *event + 8))
+ if (urb->actual_length != (sizeof(*event) + 8))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
dumpspeed(dev, (__le32 *) &event[1]);
@@ -469,7 +465,6 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- // .check_connect = cdc_check_connect,
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
@@ -493,9 +488,8 @@ static const struct driver_info wwan_info = {
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
-static const struct usb_device_id products [] = {
-/*
- * BLACKLIST !!
+static const struct usb_device_id products[] = {
+/* BLACKLIST !!
*
* First blacklist any products that are egregiously nonconformant
* with the CDC Ethernet specs. Minor braindamage we cope with; when
@@ -542,7 +536,7 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
+ | USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8007, /* C-700 */
ZAURUS_MASTER_INTERFACE,
@@ -659,8 +653,7 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
-/*
- * WHITELIST!!!
+/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
* We match the main interface, ignoring the optional device
@@ -672,60 +665,40 @@ static const struct usb_device_id products [] = {
*/
{
/* ZTE (Vodafone) K3805-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1003,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1003, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K3806-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1015,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1015, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K4510-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1173,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1173, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K3770-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1177,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1177, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K3772-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1181,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1181, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Telit modules */
+ USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (kernel_ulong_t) &wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
@@ -736,15 +709,11 @@ static const struct usb_device_id products [] = {
}, {
/* Various Huawei modems with a network port like the UMG1831 */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = HUAWEI_VENDOR_ID,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = 255,
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, 255),
.driver_info = (unsigned long)&wwan_info,
},
- { }, // END
+ { }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3a81315..6312332 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -518,6 +518,135 @@ static const struct usb_device_id products[] = {
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
+ {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+ {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7101, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7101, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x7101, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x7102, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7102, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x7102, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x8000, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x8001, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9000, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9003, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9005, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x900a, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x900b, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x900c, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x900c, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x900c, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x900d, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x900f, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x900f, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x900f, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9010, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9010, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9011, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
+ {QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9032, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9033, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9033, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9033, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9033, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9035, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9036, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9037, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9038, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x903b, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x903c, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x903d, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x903e, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9043, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9046, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9046, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9046, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9047, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x9047, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9047, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x904c, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x904c, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x904c, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x904c, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9050, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9052, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9053, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9053, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9054, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9054, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9056, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 9)},
+ {QMI_FIXED_INTF(0x05c6, 0x9064, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9065, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9065, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9066, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9066, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9067, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9069, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9069, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9069, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9069, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9070, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9070, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9075, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9077, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9077, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9077, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9077, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9078, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9080, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9080, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9080, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
@@ -612,7 +741,6 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bf64b41..d1292fe 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -564,7 +564,7 @@ static void vxlan_notify_add_rx_port(struct sock *sk)
struct net_device *dev;
struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family;
- u16 port = htons(inet_sk(sk)->inet_sport);
+ __be16 port = inet_sk(sk)->inet_sport;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -581,7 +581,7 @@ static void vxlan_notify_del_rx_port(struct sock *sk)
struct net_device *dev;
struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family;
- u16 port = htons(inet_sk(sk)->inet_sport);
+ __be16 port = inet_sk(sk)->inet_sport;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -2021,7 +2021,8 @@ static struct device_type vxlan_type = {
};
/* Calls the ndo_add_vxlan_port of the caller in order to
- * supply the listening VXLAN udp ports.
+ * supply the listening VXLAN udp ports. Callers are expected
+ * to implement the ndo_add_vxlan_port.
*/
void vxlan_get_rx_port(struct net_device *dev)
{
@@ -2029,16 +2030,13 @@ void vxlan_get_rx_port(struct net_device *dev)
struct net *net = dev_net(dev);
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
sa_family_t sa_family;
- u16 port;
- int i;
-
- if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
- return;
+ __be16 port;
+ unsigned int i;
spin_lock(&vn->sock_lock);
for (i = 0; i < PORT_HASH_SIZE; ++i) {
- hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) {
- port = htons(inet_sk(vs->sock->sk)->inet_sport);
+ hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
+ port = inet_sk(vs->sock->sk)->inet_sport;
sa_family = vs->sock->sk->sk_family;
dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
port);
@@ -2492,15 +2490,19 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
- /* create an fdb entry for default destination */
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
- NUD_REACHABLE|NUD_PERMANENT,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port, vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex, NTF_SELF);
- if (err)
- return err;
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &vxlan->default_dst.remote_ip,
+ NUD_REACHABLE|NUD_PERMANENT,
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan->dst_port,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_ifindex,
+ NTF_SELF);
+ if (err)
+ return err;
+ }
err = register_netdevice(dev);
if (err) {
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9a24e5..cfce83e 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1924,7 +1924,6 @@ static int adm8211_probe(struct pci_dev *pdev,
pci_iounmap(pdev, priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7fe1964..edf4b57 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5570,7 +5570,6 @@ static void airo_pci_remove(struct pci_dev *pdev)
airo_print_info(dev->name, "Unregistering...");
stop_airo_card(dev, 1);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1abf1d4..ba81d62 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -32,5 +32,6 @@ source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
+source "drivers/net/wireless/ath/wcn36xx/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index fb05cfd..363b056 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
+obj-$(CONFIG_WCN36XX) += wcn36xx/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 17d7fec..280fc3d 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1762,6 +1762,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 744da6d..a1f0996 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -22,7 +22,8 @@
void ath10k_bmi_start(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
+
ar->bmi.done_sent = false;
}
@@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
+
if (ar->bmi.done_sent) {
- ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
@@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
return ret;
}
- ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
return 0;
}
@@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
u32 resplen = sizeof(resp.get_target_info);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
+
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
return -EBUSY;
@@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
return 0;
}
@@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
u32 rxlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+ address, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
- __func__, ar, address, length);
-
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
@@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
u32 txlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+ address, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
- __func__, ar, address, length);
-
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
@@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
u32 resplen = sizeof(resp.execute);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+ address, *param);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
- __func__, ar, address, *param);
-
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
@@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ buffer, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+ address);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
u32 trailer_len = length - head_len;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI,
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ address, buffer, length);
+
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f8b969f..834e29e 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *indicator_addr;
-
- if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
- return;
- }
-
- /* workaround for QCA988x_1.0 HW CE */
- indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
-
- if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
- iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
- } else {
- unsigned long irq_flags;
- local_irq_save(irq_flags);
- iowrite32(1, indicator_addr);
-
- /*
- * PCIE write waits for ACK in IPQ8K, there is no
- * need to read back value.
- */
- (void)ioread32(indicator_addr);
- (void)ioread32(indicator_addr); /* conservative */
-
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
-
- iowrite32(0, indicator_addr);
- local_irq_restore(irq_flags);
- }
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
}
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
-static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
- struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_desc *desc, *sdesc;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
@@ -306,7 +277,9 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
@@ -346,7 +319,7 @@ exit:
return ret;
}
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@@ -365,33 +338,19 @@ int ath10k_ce_send(struct ce_state *ce_state,
return ret;
}
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
- unsigned int nbytes, u32 flags)
-{
- unsigned int num_items = sendlist->num_items;
- struct ce_sendlist_item *item;
-
- item = &sendlist->item[num_items];
- item->data = buffer;
- item->u.nbytes = nbytes;
- item->flags = flags;
- sendlist->num_items++;
-}
-
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
- struct ce_sendlist *sendlist,
- unsigned int transfer_id)
+ unsigned int transfer_id,
+ u32 paddr, unsigned int nbytes,
+ u32 flags)
{
- struct ce_ring_state *src_ring = ce_state->src_ring;
- struct ce_sendlist_item *item;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int num_items = sendlist->num_items;
unsigned int sw_index;
unsigned int write_index;
- int i, delta, ret = -ENOMEM;
+ int delta, ret = -ENOMEM;
spin_lock_bh(&ar_pci->ce_lock);
@@ -400,30 +359,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
- if (delta >= num_items) {
- /*
- * Handle all but the last item uniformly.
- */
- for (i = 0; i < num_items - 1; i++) {
- item = &sendlist->item[i];
- ret = ath10k_ce_send_nolock(ce_state,
- CE_SENDLIST_ITEM_CTXT,
- (u32) item->data,
- item->u.nbytes, transfer_id,
- item->flags |
- CE_SEND_FLAG_GATHER);
- if (ret)
- ath10k_warn("CE send failed for item: %d\n", i);
- }
- /*
- * Provide valid context pointer for final item.
- */
- item = &sendlist->item[i];
+ if (delta >= 1) {
ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
- (u32) item->data, item->u.nbytes,
- transfer_id, item->flags);
+ paddr, nbytes,
+ transfer_id, flags);
if (ret)
- ath10k_warn("CE send failed for last item: %d\n", i);
+ ath10k_warn("CE send failed: %d\n", ret);
}
spin_unlock_bh(&ar_pci->ce_lock);
@@ -431,11 +372,11 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_recv_context,
u32 buffer)
{
- struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -448,7 +389,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ goto out;
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -470,6 +413,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
ret = -EIO;
}
ath10k_pci_sleep(ar);
+
+out:
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -479,14 +424,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp)
{
- struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int sw_index = dest_ring->sw_index;
@@ -535,7 +480,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
return 0;
}
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -556,11 +501,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp)
{
- struct ce_ring_state *dest_ring;
+ struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@@ -612,19 +557,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
- struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
+ struct ce_desc *sdesc, *sbase;
unsigned int read_index;
- int ret = -EIO;
+ int ret;
if (src_ring->hw_index == sw_index) {
/*
@@ -634,48 +580,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
* the SW has really caught up to the HW, or if the cached
* value of the HW index has become stale.
*/
- ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
+
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->hw_index &= nentries_mask;
+
ath10k_pci_sleep(ar);
}
+
read_index = src_ring->hw_index;
- if ((read_index != sw_index) && (read_index != 0xffffffff)) {
- struct ce_desc *sbase = src_ring->shadow_base;
- struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+ if ((read_index == sw_index) || (read_index == 0xffffffff))
+ return -EIO;
- /* Return data from completed source descriptor */
- *bufferp = __le32_to_cpu(sdesc->addr);
- *nbytesp = __le16_to_cpu(sdesc->nbytes);
- *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
- CE_DESC_FLAGS_META_DATA);
+ sbase = src_ring->shadow_base;
+ sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
- if (per_transfer_contextp)
- *per_transfer_contextp =
- src_ring->per_transfer_context[sw_index];
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(sdesc->addr);
+ *nbytesp = __le16_to_cpu(sdesc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+ CE_DESC_FLAGS_META_DATA);
- /* sanity */
- src_ring->per_transfer_context[sw_index] = NULL;
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- src_ring->sw_index = sw_index;
- ret = 0;
- }
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
- return ret;
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
}
/* NB: Modeled after ath10k_ce_completed_send_next */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
- struct ce_ring_state *src_ring;
+ struct ath10k_ce_ring *src_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@@ -727,7 +679,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -756,53 +708,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
- void *transfer_context;
- u32 buf;
- unsigned int nbytes;
- unsigned int id;
- unsigned int flags;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
spin_lock_bh(&ar_pci->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
- if (ce_state->recv_cb) {
- /*
- * Pop completed recv buffers and call the registered
- * recv callback for each
- */
- while (ath10k_ce_completed_recv_next_nolock(ce_state,
- &transfer_context,
- &buf, &nbytes,
- &id, &flags) == 0) {
- spin_unlock_bh(&ar_pci->ce_lock);
- ce_state->recv_cb(ce_state, transfer_context, buf,
- nbytes, id, flags);
- spin_lock_bh(&ar_pci->ce_lock);
- }
- }
+ spin_unlock_bh(&ar_pci->ce_lock);
- if (ce_state->send_cb) {
- /*
- * Pop completed send buffers and call the registered
- * send callback for each
- */
- while (ath10k_ce_completed_send_next_nolock(ce_state,
- &transfer_context,
- &buf,
- &nbytes,
- &id) == 0) {
- spin_unlock_bh(&ar_pci->ce_lock);
- ce_state->send_cb(ce_state, transfer_context,
- buf, nbytes, id);
- spin_lock_bh(&ar_pci->ce_lock);
- }
- }
+ if (ce_state->recv_cb)
+ ce_state->recv_cb(ce_state);
+
+ if (ce_state->send_cb)
+ ce_state->send_cb(ce_state);
+
+ spin_lock_bh(&ar_pci->ce_lock);
/*
* Misc CE interrupts are not being handled, but still need
@@ -823,10 +751,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
u32 intr_summary;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@@ -849,13 +780,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
*
* Called with ce_lock held.
*/
-static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
int disable_copy_compl_intr)
{
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
+ int ret;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
if ((!disable_copy_compl_intr) &&
(ce_state->send_cb || ce_state->recv_cb))
@@ -871,11 +805,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
- struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@@ -883,12 +820,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts)
{
struct ath10k *ar = ce_state->ar;
@@ -900,13 +833,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,
spin_unlock_bh(&ar_pci->ce_lock);
}
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags))
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -919,11 +847,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_ring_state *src_ring;
+ struct ath10k_ce_ring *src_ring;
unsigned int nentries = attr->src_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -937,19 +865,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
return 0;
}
- ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
- ce_state->src_ring = (struct ce_ring_state *)ptr;
+ ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
src_ring = ce_state->src_ring;
- ptr += sizeof(struct ce_ring_state);
+ ptr += sizeof(struct ath10k_ce_ring);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
- ath10k_pci_wake(ar);
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index;
@@ -957,7 +884,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->write_index =
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- ath10k_pci_sleep(ar);
src_ring->per_transfer_context = (void **)ptr;
@@ -970,6 +896,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_state->src_ring);
+ ce_state->src_ring = NULL;
+ return -ENOMEM;
+ }
+
src_ring->base_addr_ce_space_unaligned = base_addr;
src_ring->base_addr_owner_space = PTR_ALIGN(
@@ -986,12 +918,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->shadow_base_unaligned =
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
+ if (!src_ring->shadow_base_unaligned) {
+ pci_free_consistent(ar_pci->pdev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space,
+ src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ ce_state->src_ring = NULL;
+ return -ENOMEM;
+ }
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_pci_wake(ar);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@@ -999,18 +940,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_ring_state *dest_ring;
+ struct ath10k_ce_ring *dest_ring;
unsigned int nentries = attr->dest_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -1024,25 +968,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
return 0;
}
- ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
- ce_state->dest_ring = (struct ce_ring_state *)ptr;
+ ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
dest_ring = ce_state->dest_ring;
- ptr += sizeof(struct ce_ring_state);
+ ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
- ath10k_pci_wake(ar);
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
- ath10k_pci_sleep(ar);
dest_ring->per_transfer_context = (void **)ptr;
@@ -1055,6 +997,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_state->dest_ring);
+ ce_state->dest_ring = NULL;
+ return -ENOMEM;
+ }
+
dest_ring->base_addr_ce_space_unaligned = base_addr;
/*
@@ -1071,44 +1019,35 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_pci_wake(ar);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
-static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = NULL;
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
- if (!ar_pci->ce_id_to_state[ce_id]) {
- ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
- if (ce_state == NULL) {
- spin_unlock_bh(&ar_pci->ce_lock);
- return NULL;
- }
-
- ar_pci->ce_id_to_state[ce_id] = ce_state;
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ctrl_addr;
- ce_state->state = CE_RUNNING;
- /* Save attribute flags */
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
- }
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ctrl_addr;
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
@@ -1122,12 +1061,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
- struct ce_state *ce_state;
+ struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return NULL;
ce_state = ath10k_ce_init_state(ar, ce_id, attr);
if (!ce_state) {
@@ -1136,40 +1080,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
}
if (attr->src_nentries) {
- if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
- ath10k_err("Failed to initialize CE src ring for ID: %d\n",
- ce_id);
+ ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+ if (ret) {
+ ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
+ ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
if (attr->dest_nentries) {
- if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
- ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
- ce_id);
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+ if (ret) {
+ ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
/* Enable CE error interrupts */
- ath10k_pci_wake(ar);
ath10k_ce_error_intr_enable(ar, ctrl_addr);
+
ath10k_pci_sleep(ar);
return ce_state;
}
-void ath10k_ce_deinit(struct ce_state *ce_state)
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
{
- unsigned int ce_id = ce_state->id;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ce_state->state = CE_UNUSED;
- ar_pci->ce_id_to_state[ce_id] = NULL;
-
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
pci_free_consistent(ar_pci->pdev,
@@ -1190,5 +1132,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state)
ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
- kfree(ce_state);
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index c17f07c..aec8028 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -27,7 +27,6 @@
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
-#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
@@ -36,16 +35,9 @@
* how to use copy engines.
*/
-struct ce_state;
+struct ath10k_ce_pipe;
-/* Copy Engine operational state */
-enum ce_op_state {
- CE_UNUSED,
- CE_PAUSED,
- CE_RUNNING,
-};
-
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@@ -57,8 +49,7 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
-/* Copy Engine Ring internal state */
-struct ce_ring_state {
+struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
@@ -116,49 +107,20 @@ struct ce_ring_state {
void **per_transfer_context;
};
-/* Copy Engine internal state */
-struct ce_state {
+struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
- enum ce_op_state state;
-
- void (*send_cb) (struct ce_state *ce_state,
- void *per_transfer_send_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id);
- void (*recv_cb) (struct ce_state *ce_state,
- void *per_transfer_recv_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags);
- unsigned int src_sz_max;
- struct ce_ring_state *src_ring;
- struct ce_ring_state *dest_ring;
-};
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
-struct ce_sendlist_item {
- /* e.g. buffer or desc list */
- dma_addr_t data;
- union {
- /* simple buffer */
- unsigned int nbytes;
- /* Rx descriptor list */
- unsigned int ndesc;
- } u;
- /* externally-specified flags; OR-ed with internal flags */
- u32 flags;
-};
-
-struct ce_sendlist {
- unsigned int num_items;
- struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+ unsigned int src_sz_max;
+ struct ath10k_ce_ring *src_ring;
+ struct ath10k_ce_ring *dest_ring;
};
/* Copy Engine settable attributes */
@@ -182,7 +144,7 @@ struct ce_attr;
*
* Implementation note: pushes 1 buffer to Source ring
*/
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
@@ -190,21 +152,10 @@ int ath10k_ce_send(struct ce_state *ce_state,
unsigned int transfer_id,
unsigned int flags);
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
-/* Append a simple buffer (address/length) to a sendlist. */
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
- u32 buffer,
- unsigned int nbytes,
- /* OR-ed with internal flags */
- u32 flags);
-
/*
* Queue a "sendlist" of buffers to be sent using gather to a single
* anonymous destination buffer
@@ -215,11 +166,11 @@ void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
*
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
- void *per_transfer_send_context,
- struct ce_sendlist *sendlist,
- /* 14 bits */
- unsigned int transfer_id);
+int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ unsigned int transfer_id,
+ u32 paddr, unsigned int nbytes,
+ u32 flags);
/*==================Recv=======================*/
@@ -233,17 +184,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer);
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags));
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *));
/* recv flags */
/* Data is byte-swapped */
@@ -253,7 +199,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -263,7 +209,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -272,7 +218,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
@@ -282,7 +228,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
* receive buffers. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
@@ -291,13 +237,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* pending sends. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
-void ath10k_ce_deinit(struct ce_state *ce_state);
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@@ -322,9 +268,6 @@ struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
- /* currently not in use */
- unsigned int priority;
-
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
@@ -336,21 +279,8 @@ struct ce_attr {
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
-
- /* Future use */
- void *reserved;
};
-/*
- * When using sendlist_send to transfer multiple buffer fragments, the
- * transfer context of each fragment, except last one, will be filled
- * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
- * each fragment done with send and the transfer context would be
- * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
- * status of a send completion.
- */
-#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
-
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7226c23..76906d5 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -39,17 +39,6 @@ MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
- .id = QCA988X_HW_1_0_VERSION,
- .name = "qca988x hw1.0",
- .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
- .fw = {
- .dir = QCA988X_HW_1_0_FW_DIR,
- .fw = QCA988X_HW_1_0_FW_FILE,
- .otp = QCA988X_HW_1_0_OTP_FILE,
- .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
- },
- },
- {
.id = QCA988X_HW_2_0_VERSION,
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
@@ -64,7 +53,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
ar->is_target_paused = true;
wake_up(&ar->event_queue);
@@ -112,7 +101,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto timeout;
}
- ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
return 0;
timeout:
@@ -214,8 +203,8 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
return ret;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "ath10k: Board extended Data download addr: 0x%x\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot push board extended data addr 0x%x\n",
board_ext_data_addr);
if (board_ext_data_addr == 0)
@@ -446,6 +435,13 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
+ /* Set the UART baud rate to 19200. */
+ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+ if (ret) {
+ ath10k_warn("could not set the baud rate (%d)\n", ret);
+ return ret;
+ }
+
ath10k_info("UART prints enabled\n");
return 0;
}
@@ -641,6 +637,10 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_disconnect_htc;
+ status = ath10k_debug_start(ar);
+ if (status)
+ goto err_disconnect_htc;
+
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
return 0;
@@ -658,6 +658,7 @@ EXPORT_SYMBOL(ath10k_core_start);
void ath10k_core_stop(struct ath10k *ar)
{
+ ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
@@ -717,10 +718,46 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return 0;
}
-int ath10k_core_register(struct ath10k *ar)
+static int ath10k_core_check_chip_id(struct ath10k *ar)
+{
+ u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
+ ar->chip_id, hw_revision);
+
+ /* Check that we are not using hw1.0 (some of them have same pci id
+ * as hw2.0) before doing anything else as ath10k crashes horribly
+ * due to missing hw1.0 workarounds. */
+ switch (hw_revision) {
+ case QCA988X_HW_1_0_CHIP_ID_REV:
+ ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
+ return -EOPNOTSUPP;
+
+ case QCA988X_HW_2_0_CHIP_ID_REV:
+ /* known hardware revision, continue normally */
+ return 0;
+
+ default:
+ ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
+ ar->chip_id);
+ return 0;
+ }
+
+ return 0;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
int status;
+ ar->chip_id = chip_id;
+
+ status = ath10k_core_check_chip_id(ar);
+ if (status) {
+ ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+ return status;
+ }
+
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
@@ -755,6 +792,7 @@ void ath10k_core_unregister(struct ath10k *ar)
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
ath10k_mac_unregister(ar);
+
ath10k_core_free_firmware_files(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e4bba56..292ad45 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -52,18 +52,12 @@ struct ath10k_skb_cb {
struct {
u8 vdev_id;
- u16 msdu_id;
u8 tid;
bool is_offchan;
- bool is_conf;
- bool discard;
- bool no_ack;
- u8 refcount;
- struct sk_buff *txfrag;
- struct sk_buff *msdu;
- } __packed htt;
- /* 4 bytes left on 64bit arch */
+ u8 frag_len;
+ u8 pad_len;
+ } __packed htt;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -112,11 +106,7 @@ struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
- atomic_t pending_tx_count;
- wait_queue_head_t wq;
-
- struct sk_buff_head wmi_event_list;
- struct work_struct wmi_event_work;
+ wait_queue_head_t tx_credits_wq;
};
struct ath10k_peer_stat {
@@ -203,6 +193,7 @@ struct ath10k_vif {
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
+ struct sk_buff *beacon;
struct ath10k *ar;
struct ieee80211_vif *vif;
@@ -246,6 +237,9 @@ struct ath10k_debug {
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
struct completion event_stats_compl;
+
+ unsigned long htt_stats_mask;
+ struct delayed_work htt_stats_dwork;
};
enum ath10k_state {
@@ -270,12 +264,21 @@ enum ath10k_state {
ATH10K_STATE_WEDGED,
};
+enum ath10k_fw_features {
+ /* wmi_mgmt_rx_hdr contains extra RSSI information */
+ ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+ /* keep last */
+ ATH10K_FW_FEATURE_COUNT,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
+ u32 chip_id;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@@ -288,6 +291,8 @@ struct ath10k {
u32 vht_cap_info;
u32 num_rf_chains;
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
struct targetdef *targetdef;
struct hostdef *hostdef;
@@ -393,7 +398,7 @@ void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar);
void ath10k_core_stop(struct ath10k *ar);
-int ath10k_core_register(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 3d65594..59615c7 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -21,6 +21,9 @@
#include "core.h"
#include "debug.h"
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
@@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
spin_unlock_bh(&ar->data_lock);
- mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
@@ -499,6 +501,136 @@ static const struct file_operations fops_simulate_fw_crash = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[50];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+ .read = ath10k_read_chip_id,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+ u64 cookie;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->debug.htt_stats_mask == 0)
+ /* htt stats are disabled */
+ return 0;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return 0;
+
+ cookie = get_jiffies_64();
+
+ ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+ cookie);
+ if (ret) {
+ ath10k_warn("failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+ msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+ return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ debug.htt_stats_dwork.work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_debug_htt_stats_req(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ /* max 8 bit masks (for now) */
+ if (mask > 0xff)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats_mask = mask;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+ .read = ath10k_read_htt_stats_mask,
+ .write = ath10k_write_htt_stats_mask,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ /* continue normally anyway, this isn't serious */
+ ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+
+ return 0;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+ cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -507,6 +639,9 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.debugfs_phy)
return -ENOMEM;
+ INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+ ath10k_debug_htt_stats_dwork);
+
init_completion(&ar->debug.event_stats_compl);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -518,8 +653,15 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_simulate_fw_crash);
+ debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_chip_id);
+
+ debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_htt_stats_mask);
+
return 0;
}
+
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 168140c..fa58148 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -27,11 +27,12 @@ enum ath10k_debug_mask {
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
- ATH10K_DBG_CORE = 0x00000020,
+ ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
+ ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_ANY = 0xffffffff,
};
@@ -42,6 +43,8 @@ extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
@@ -50,6 +53,15 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
#else
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index ef3329e..3118d75 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
- memset(hdr, 0, sizeof(*hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+ hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
@@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
-static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep,
- struct sk_buff *skb,
- u8 credits)
-{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- int ret;
-
- ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
- ep->eid, skb);
-
- ath10k_htc_prepare_tx_skb(ep, skb);
-
- ret = ath10k_skb_map(htc->ar->dev, skb);
- if (ret)
- goto err;
-
- ret = ath10k_hif_send_head(htc->ar,
- ep->ul_pipe_id,
- ep->eid,
- skb->len,
- skb);
- if (unlikely(ret))
- goto err;
-
- return 0;
-err:
- ath10k_warn("HTC issue failed: %d\n", ret);
-
- spin_lock_bh(&htc->tx_lock);
- ep->tx_credits += credits;
- spin_unlock_bh(&htc->tx_lock);
-
- /* this is the simplest way to handle out-of-resources for non-credit
- * based endpoints. credit based endpoints can still get -ENOSR, but
- * this is highly unlikely as credit reservation should prevent that */
- if (ret == -ENOSR) {
- spin_lock_bh(&htc->tx_lock);
- __skb_queue_head(&ep->tx_queue, skb);
- spin_unlock_bh(&htc->tx_lock);
-
- return ret;
- }
-
- skb_cb->is_aborted = true;
- ath10k_htc_notify_tx_completion(ep, skb);
-
- return ret;
-}
-
-static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep,
- u8 *credits)
-{
- struct sk_buff *skb;
- struct ath10k_skb_cb *skb_cb;
- int credits_required;
- int remainder;
- unsigned int transfer_len;
-
- lockdep_assert_held(&htc->tx_lock);
-
- skb = __skb_dequeue(&ep->tx_queue);
- if (!skb)
- return NULL;
-
- skb_cb = ATH10K_SKB_CB(skb);
- transfer_len = skb->len;
-
- if (likely(transfer_len <= htc->target_credit_size)) {
- credits_required = 1;
- } else {
- /* figure out how many credits this message requires */
- credits_required = transfer_len / htc->target_credit_size;
- remainder = transfer_len % htc->target_credit_size;
-
- if (remainder)
- credits_required++;
- }
-
- ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
- credits_required, ep->tx_credits);
-
- if (ep->tx_credits < credits_required) {
- __skb_queue_head(&ep->tx_queue, skb);
- return NULL;
- }
-
- ep->tx_credits -= credits_required;
- *credits = credits_required;
- return skb;
-}
-
-static void ath10k_htc_send_work(struct work_struct *work)
-{
- struct ath10k_htc_ep *ep = container_of(work,
- struct ath10k_htc_ep, send_work);
- struct ath10k_htc *htc = ep->htc;
- struct sk_buff *skb;
- u8 credits = 0;
- int ret;
-
- while (true) {
- if (ep->ul_is_polled)
- ath10k_htc_send_complete_check(ep, 0);
-
- spin_lock_bh(&htc->tx_lock);
- if (ep->tx_credit_flow_enabled)
- skb = ath10k_htc_get_skb_credit_based(htc, ep,
- &credits);
- else
- skb = __skb_dequeue(&ep->tx_queue);
- spin_unlock_bh(&htc->tx_lock);
-
- if (!skb)
- break;
-
- ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
- if (ret == -ENOSR)
- break;
- }
-}
-
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ int credits = 0;
+ int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return -ENOENT;
}
+ /* FIXME: This looks ugly, can we fix it? */
spin_lock_bh(&htc->tx_lock);
if (htc->stopped) {
spin_unlock_bh(&htc->tx_lock);
return -ESHUTDOWN;
}
+ spin_unlock_bh(&htc->tx_lock);
- __skb_queue_tail(&ep->tx_queue, skb);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
- spin_unlock_bh(&htc->tx_lock);
- queue_work(htc->ar->workqueue, &ep->send_work);
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ ret = ath10k_skb_map(htc->ar->dev, skb);
+ if (ret)
+ goto err_credits;
+
+ ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
+ skb->len, skb);
+ if (ret)
+ goto err_unmap;
+
return 0;
+
+err_unmap:
+ ath10k_skb_unmap(htc->ar->dev, skb);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
@@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
- /* note: when using TX credit flow, the re-checking of queues happens
- * when credits flow back from the target. in the non-TX credit case,
- * we recheck after the packet completes */
- spin_lock_bh(&htc->tx_lock);
- if (!ep->tx_credit_flow_enabled && !htc->stopped)
- queue_work(ar->workqueue, &ep->send_work);
- spin_unlock_bh(&htc->tx_lock);
-
return 0;
}
-/* flush endpoint TX queue */
-static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep)
-{
- struct sk_buff *skb;
- struct ath10k_skb_cb *skb_cb;
-
- spin_lock_bh(&htc->tx_lock);
- for (;;) {
- skb = __skb_dequeue(&ep->tx_queue);
- if (!skb)
- break;
-
- skb_cb = ATH10K_SKB_CB(skb);
- skb_cb->is_aborted = true;
- ath10k_htc_notify_tx_completion(ep, skb);
- }
- spin_unlock_bh(&htc->tx_lock);
-
- cancel_work_sync(&ep->send_work);
-}
-
/***********/
/* Receive */
/***********/
@@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
- if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
- queue_work(htc->ar->workqueue, &ep->send_work);
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ spin_lock_bh(&htc->tx_lock);
+ }
}
spin_unlock_bh(&htc->tx_lock);
}
@@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
- skb_queue_head_init(&ep->tx_queue);
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
- INIT_WORK(&ep->send_work, ath10k_htc_send_work);
}
}
@@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC Service %s does not allocate target credits\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
@@ -772,16 +659,16 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
- req_msg = &msg->connect_service;
- req_msg->flags = __cpu_to_le16(flags);
- req_msg->service_id = __cpu_to_le16(conn_req->service_id);
-
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
+ req_msg = &msg->connect_service;
+ req_msg->flags = __cpu_to_le16(flags);
+ req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
INIT_COMPLETION(htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
@@ -873,19 +760,19 @@ setup:
if (status)
return status;
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
- ath10k_dbg(ATH10K_DBG_HTC,
- "EP %d UL polled: %d, DL polled: %d\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC service: %s eid: %d TX flow control disabled\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
@@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
*/
void ath10k_htc_stop(struct ath10k_htc *htc)
{
- int i;
- struct ath10k_htc_ep *ep;
-
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
- for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
- ep = &htc->endpoint[i];
- ath10k_htc_flush_endpoint_tx(htc, ep);
- }
-
ath10k_hif_stop(htc->ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e1dd8c7..4716d33 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -276,6 +276,7 @@ struct ath10k_htc_ops {
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
@@ -315,15 +316,11 @@ struct ath10k_htc_ep {
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
- struct sk_buff_head tx_queue;
-
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
-
- struct work_struct send_work;
};
struct ath10k_htc_svc_tx_credits {
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 39342c5..5f7eeeb 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,21 +104,16 @@ err_htc_attach:
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt target version %d.%d; host version %d.%d\n",
- htt->target_version_major,
- htt->target_version_minor,
- HTT_CURRENT_VERSION_MAJOR,
- HTT_CURRENT_VERSION_MINOR);
-
- if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
- ath10k_err("htt major versions are incompatible!\n");
+ ath10k_info("htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
+
+ if (htt->target_version_major != 2 &&
+ htt->target_version_major != 3) {
+ ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
+ htt->target_version_major);
return -ENOTSUPP;
}
- if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
- ath10k_warn("htt minor version differ but still compatible\n");
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 318be46..1a337e9 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -19,13 +19,11 @@
#define _HTT_H_
#include <linux/bug.h>
+#include <linux/interrupt.h>
#include "htc.h"
#include "rx_desc.h"
-#define HTT_CURRENT_VERSION_MAJOR 2
-#define HTT_CURRENT_VERSION_MINOR 1
-
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
HTT_DBG_STATS_RX_REORDER = 1 << 1,
@@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */
HTT_H2T_MSG_TYPE_SYNC = 4,
HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+ /* This command is used for sending management frames in HTT < 3.0.
+ * HTT >= 3.0 uses TX_FRM for everything. */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
HTT_H2T_NUM_MSGS /* keep this last */
@@ -1268,6 +1269,7 @@ struct ath10k_htt {
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
+ struct tasklet_struct rx_replenish_task;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1308,6 +1310,10 @@ struct htt_rx_desc {
#define HTT_RX_BUF_SIZE 1920
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely. */
+#define ATH10K_HTT_MAX_NUM_REFILL 16
+
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
* Rather than checking the actual cache line size, this code makes a
@@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e784c40..90d4f74 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -20,6 +20,7 @@
#include "htt.h"
#include "txrx.h"
#include "debug.h"
+#include "trace.h"
#include <linux/log2.h>
@@ -40,6 +41,10 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+
+
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
int size;
@@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{
- int ret, num_to_fill;
+ int ret, num_deficit, num_to_fill;
+ /* Refilling the whole RX ring buffer proves to be a bad idea. The
+ * reason is RX may take up significant amount of CPU cycles and starve
+ * other tasks, e.g. TX on an ethernet device while acting as a bridge
+ * with ath10k wlan interface. This ended up with very poor performance
+ * once CPU the host system was overwhelmed with RX on ath10k.
+ *
+ * By limiting the number of refills the replenishing occurs
+ * progressively. This in turns makes use of the fact tasklets are
+ * processed in FIFO order. This means actual RX processing can starve
+ * out refilling. If there's not enough buffers on RX ring FW will not
+ * report RX until it is refilled with enough buffers. This
+ * automatically balances load wrt to CPU power.
+ *
+ * This probably comes at a cost of lower maximum throughput but
+ * improves the avarage and stability. */
spin_lock_bh(&htt->rx_ring.lock);
- num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+ num_deficit -= num_to_fill;
ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
if (ret == -ENOMEM) {
/*
@@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
*/
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+ } else if (num_deficit > 0) {
+ tasklet_schedule(&htt->rx_replenish_task);
}
spin_unlock_bh(&htt->rx_ring.lock);
}
@@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
+ tasklet_kill(&htt->rx_replenish_task);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
@@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
+static void ath10k_htt_rx_replenish_task(unsigned long ptr)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
int ath10k_htt_rx_attach(struct ath10k_htt *htt)
{
dma_addr_t paddr;
@@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
goto err_fill_ring;
- ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+ tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
+ (unsigned long)htt);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -590,134 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
return false;
}
-static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct htt_rx_info *info)
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+struct amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+ struct htt_rx_info *info)
{
struct htt_rx_desc *rxd;
- struct sk_buff *amsdu;
struct sk_buff *first;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
+ struct ieee80211_hdr *hdr;
+ u8 hdr_buf[64], addr[ETH_ALEN], *qos;
unsigned int hdr_len;
- int crypto_len;
rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- /* FIXME: No idea what assumptions are safe here. Need logs */
- if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
- (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- return -ENOTSUPP;
- }
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(hdr_buf, hdr, hdr_len);
+ hdr = (struct ieee80211_hdr *)hdr_buf;
- /* A-MSDU max is a little less than 8K */
- amsdu = dev_alloc_skb(8*1024);
- if (!amsdu) {
- ath10k_warn("A-MSDU allocation failed\n");
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- return -ENOMEM;
- }
-
- if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
- int hdrlen;
-
- hdr = (void *)rxd->rx_hdr_status;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
- }
+ /* FIXME: Hopefully this is a temporary measure.
+ *
+ * Reporting individual A-MSDU subframes means each reported frame
+ * shares the same sequence number.
+ *
+ * mac80211 drops frames it recognizes as duplicates, i.e.
+ * retransmission flag is set and sequence number matches sequence
+ * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
+ * "Duplicate detection and recovery")
+ *
+ * To avoid frames being dropped clear retransmission flag for all
+ * received A-MSDUs.
+ *
+ * Worst case: actual duplicate frames will be reported but this should
+ * still be handled gracefully by other OSI/ISO layers. */
+ hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
first = skb;
while (skb) {
void *decap_hdr;
- int decap_len = 0;
+ int len;
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
decap_hdr = (void *)rxd->rx_hdr_status;
- if (skb == first) {
- /* We receive linked A-MSDU subframe skbuffs. The
- * first one contains the original 802.11 header (and
- * possible crypto param) in the RX descriptor. The
- * A-MSDU subframe header follows that. Each part is
- * aligned to 4 byte boundary. */
-
- hdr = (void *)amsdu->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
-
- decap_hdr += roundup(hdr_len, 4);
- decap_hdr += roundup(crypto_len, 4);
- }
+ skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
- if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
- /* Ethernet2 decap inserts ethernet header in place of
- * A-MSDU subframe header. */
- skb_pull(skb, 6 + 6 + 2);
-
- /* A-MSDU subframe header length */
- decap_len += 6 + 6 + 2;
-
- /* Ethernet2 decap also strips the LLC/SNAP so we need
- * to re-insert it. The LLC/SNAP follows A-MSDU
- * subframe header. */
- /* FIXME: Not all LLCs are 8 bytes long */
- decap_len += 8;
-
- memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+ /* First frame in an A-MSDU chain has more decapped data. */
+ if (skb == first) {
+ len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+ len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
+ 4);
+ decap_hdr += len;
}
- if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
- /* Native Wifi decap inserts regular 802.11 header
- * in place of A-MSDU subframe header. */
+ switch (fmt) {
+ case RX_MSDU_DECAP_RAW:
+ /* remove trailing FCS */
+ skb_trim(skb, skb->len - FCS_LEN);
+ break;
+ case RX_MSDU_DECAP_NATIVE_WIFI:
+ /* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
- skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
+ skb_pull(skb, hdr_len);
- /* A-MSDU subframe header length */
- decap_len += 6 + 6 + 2;
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)hdr_buf;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
- memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
- }
+ /* original A-MSDU header has the bit set but we're
+ * not including A-MSDU subframe header */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- if (fmt == RX_MSDU_DECAP_RAW)
- skb_trim(skb, skb->len - 4); /* remove FCS */
+ /* original 802.11 header has a different DA */
+ memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
+ break;
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
+ /* strip ethernet header and insert decapped 802.11
+ * header, amsdu subframe header and rfc1042 header */
- memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
+ len = 0;
+ len += sizeof(struct rfc1042_hdr);
+ len += sizeof(struct amsdu_subframe_hdr);
- /* A-MSDU subframes are padded to 4bytes
- * but relative to first subframe, not the whole MPDU */
- if (skb->next && ((decap_len + skb->len) & 3)) {
- int padlen = 4 - ((decap_len + skb->len) & 3);
- memset(skb_put(amsdu, padlen), 0, padlen);
+ skb_pull(skb, sizeof(struct ethhdr));
+ memcpy(skb_push(skb, len), decap_hdr, len);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* insert decapped 802.11 header making a singly
+ * A-MSDU */
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
}
+ info->skb = skb;
+ info->encrypt_type = enctype;
skb = skb->next;
- }
+ info->skb->next = NULL;
- info->skb = amsdu;
- info->encrypt_type = enctype;
-
- ath10k_htt_rx_free_msdu_chain(first);
+ ath10k_process_rx(htt->ar, info);
+ }
- return 0;
+ /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
+ * monitor interface active for sniffing purposes. */
}
-static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
{
struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
+ int hdr_len;
+ void *rfc1042;
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
@@ -731,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
switch (fmt) {
case RX_MSDU_DECAP_RAW:
/* remove trailing FCS */
- skb_trim(skb, skb->len - 4);
+ skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- /* nothing to do here */
+ /* Pull decapped header */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ skb_pull(skb, hdr_len);
+
+ /* Push original header */
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
- /* macaddr[6] + macaddr[6] + ethertype[2] */
- skb_pull(skb, 6 + 6 + 2);
- break;
- case RX_MSDU_DECAP_8023_SNAP_LLC:
- /* macaddr[6] + macaddr[6] + len[2] */
- /* we don't need this for non-A-MSDU */
- skb_pull(skb, 6 + 6 + 2);
- break;
- }
+ /* strip ethernet header and insert decapped 802.11 header and
+ * rfc1042 header */
- if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
- void *llc;
- int llclen;
+ rfc1042 = hdr;
+ rfc1042 += roundup(hdr_len, 4);
+ rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
- llclen = 8;
- llc = hdr;
- llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
- llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
-
- skb_push(skb, llclen);
- memcpy(skb->data, llc, llclen);
- }
+ skb_pull(skb, sizeof(struct ethhdr));
+ memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
+ rfc1042, sizeof(struct rfc1042_hdr));
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* remove A-MSDU subframe header and insert
+ * decapped 802.11 header. rfc1042 header is already there */
- if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
- int len = ieee80211_hdrlen(hdr->frame_control);
- skb_push(skb, len);
- memcpy(skb->data, hdr, len);
+ skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
}
info->skb = skb;
info->encrypt_type = enctype;
- return 0;
+
+ ath10k_process_rx(htt->ar, info);
}
static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
@@ -845,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
int fw_desc_len;
u8 *fw_desc;
int i, j;
- int ret;
- int ip_summed;
memset(&info, 0, sizeof(info));
@@ -921,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
- /* The skb is not yet processed and it may be
- * reallocated. Since the offload is in the original
- * skb extract the checksum now and assign it later */
- ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
-
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -938,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ret = ath10k_htt_rx_amsdu(htt, &info);
+ ath10k_htt_rx_amsdu(htt, &info);
else
- ret = ath10k_htt_rx_msdu(htt, &info);
-
- if (ret && !info.fcs_err) {
- ath10k_warn("error processing msdus %d\n", ret);
- dev_kfree_skb_any(info.skb);
- continue;
- }
-
- if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
- ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
-
- info.skb->ip_summed = ip_summed;
-
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
- info.skb->data, info.skb->len);
- ath10k_process_rx(htt->ar, &info);
+ ath10k_htt_rx_msdu(htt, &info);
}
}
- ath10k_htt_rx_msdu_buff_replenish(htt);
+ tasklet_schedule(&htt->rx_replenish_task);
}
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
@@ -1131,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
- ath10k_txrx_tx_completed(htt, &tx_done);
+ ath10k_txrx_tx_unref(htt, &tx_done);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
@@ -1165,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_completed(htt, &tx_done);
+ ath10k_txrx_tx_unref(htt, &tx_done);
}
break;
}
@@ -1190,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_TEST:
/* FIX THIS */
break;
- case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_STATS_CONF:
+ trace_ath10k_htt_stats(skb->data, skb->len);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_RX_ADDBA:
case HTT_T2H_MSG_TYPE_RX_DELBA:
case HTT_T2H_MSG_TYPE_RX_FLUSH:
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 656c254..3b93c6a 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
{
- struct sk_buff *txdesc;
+ struct htt_tx_done tx_done = {0};
int msdu_id;
/* No locks needed. Called after communication with the device has
@@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
- txdesc = htt->pending_tx[msdu_id];
- if (!txdesc)
- continue;
-
ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
- if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
- ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+ tx_done.discard = 1;
+ tx_done.msdu_id = msdu_id;
- ATH10K_SKB_CB(txdesc)->htt.discard = true;
- ath10k_txrx_tx_unref(htt, txdesc);
+ ath10k_txrx_tx_unref(htt, &tx_done);
}
}
@@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- struct ath10k_htt *htt = &ar->htt;
-
- if (skb_cb->htt.is_conf) {
- dev_kfree_skb_any(skb);
- return;
- }
-
- if (skb_cb->is_aborted) {
- skb_cb->htt.discard = true;
-
- /* if the skbuff is aborted we need to make sure we'll free up
- * the tx resources, we can't simply run tx_unref() 2 times
- * because if htt tx completion came in earlier we'd access
- * unallocated memory */
- if (skb_cb->htt.refcount > 1)
- skb_cb->htt.refcount = 1;
- }
-
- ath10k_txrx_tx_unref(htt, skb);
+ dev_kfree_skb_any(skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
@@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
- ATH10K_SKB_CB(skb)->htt.is_conf = true;
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+ struct htt_stats_req *req;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0, ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->stats_req);
+
+ skb = ath10k_htc_alloc_skb(len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+ req = &cmd->stats_req;
+
+ memset(req, 0, sizeof(*req));
+
+ /* currently we support only max 8 bit masks so no need to worry
+ * about endian support */
+ req->upload_types[0] = mask;
+ req->reset_types[0] = mask;
+ req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+ req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+ req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
+ ath10k_warn("failed to send htt type stats request: %d", ret);
dev_kfree_skb_any(skb);
return ret;
}
@@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
#undef desc_offset
- ATH10K_SKB_CB(skb)->htt.is_conf = true;
-
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
- struct ath10k_skb_cb *skb_cb;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ u8 vdev_id = skb_cb->htt.vdev_id;
int len = 0;
int msdu_id = -1;
int res;
@@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
- return res;
+ goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
- txdesc = ath10k_htc_alloc_skb(len);
- if (!txdesc) {
- res = -ENOMEM;
- goto err;
- }
-
spin_lock_bh(&htt->tx_lock);
- msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
- if (msdu_id < 0) {
+ res = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
- res = msdu_id;
- goto err;
+ goto err_tx_dec;
}
- htt->pending_tx[msdu_id] = txdesc;
+ msdu_id = res;
+ htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
+ txdesc = ath10k_htc_alloc_skb(len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err_free_msdu_id;
+ }
+
res = ath10k_skb_map(dev, msdu);
if (res)
- goto err;
+ goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
@@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
- /* refcount is decremented by HTC and HTT completions until it reaches
- * zero and is freed */
- skb_cb = ATH10K_SKB_CB(txdesc);
- skb_cb->htt.msdu_id = msdu_id;
- skb_cb->htt.refcount = 2;
- skb_cb->htt.msdu = msdu;
+ skb_cb->htt.frag_len = 0;
+ skb_cb->htt.pad_len = 0;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
- goto err;
+ goto err_unmap_msdu;
return 0;
-err:
+err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
-
- if (txdesc)
- dev_kfree_skb_any(txdesc);
- if (msdu_id >= 0) {
- spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
- }
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
+err:
return res;
}
@@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct htt_cmd *cmd;
struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
- struct ath10k_skb_cb *skb_cb;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *txdesc = NULL;
- struct sk_buff *txfrag = NULL;
+ bool use_frags;
u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
u8 tid;
- int prefetch_len, desc_len, frag_len;
- dma_addr_t frags_paddr;
+ int prefetch_len, desc_len;
int msdu_id = -1;
int res;
u8 flags0;
@@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
- return res;
+ goto err;
+
+ spin_lock_bh(&htt->tx_lock);
+ res = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (res < 0) {
+ spin_unlock_bh(&htt->tx_lock);
+ goto err_tx_dec;
+ }
+ msdu_id = res;
+ htt->pending_tx[msdu_id] = msdu;
+ spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
- frag_len = sizeof(*tx_frags) * 2;
txdesc = ath10k_htc_alloc_skb(desc_len);
if (!txdesc) {
res = -ENOMEM;
- goto err;
+ goto err_free_msdu_id;
}
- txfrag = dev_alloc_skb(frag_len);
- if (!txfrag) {
- res = -ENOMEM;
- goto err;
- }
+ /* Since HTT 3.0 there is no separate mgmt tx command. However in case
+ * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
+ * fragment list host driver specifies directly frame pointer. */
+ use_frags = htt->target_version_major < 3 ||
+ !ieee80211_is_mgmt(hdr->frame_control);
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
ath10k_warn("htt alignment check failed. dropping packet.\n");
res = -EIO;
- goto err;
+ goto err_free_txdesc;
}
- spin_lock_bh(&htt->tx_lock);
- msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
- if (msdu_id < 0) {
- spin_unlock_bh(&htt->tx_lock);
- res = msdu_id;
- goto err;
+ if (use_frags) {
+ skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
+ skb_cb->htt.pad_len = (unsigned long)msdu->data -
+ round_down((unsigned long)msdu->data, 4);
+
+ skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+ } else {
+ skb_cb->htt.frag_len = 0;
+ skb_cb->htt.pad_len = 0;
}
- htt->pending_tx[msdu_id] = txdesc;
- spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
- goto err;
-
- /* tx fragment list must be terminated with zero-entry */
- skb_put(txfrag, frag_len);
- tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
- tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
- tx_frags[0].len = __cpu_to_le32(msdu->len);
- tx_frags[1].paddr = __cpu_to_le32(0);
- tx_frags[1].len = __cpu_to_le32(0);
-
- res = ath10k_skb_map(dev, txfrag);
- if (res)
- goto err;
+ goto err_pull_txfrag;
+
+ if (use_frags) {
+ dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ /* tx fragment list must be terminated with zero-entry */
+ tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
+ tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
+ skb_cb->htt.frag_len +
+ skb_cb->htt.pad_len);
+ tx_frags[0].len = __cpu_to_le32(msdu->len -
+ skb_cb->htt.frag_len -
+ skb_cb->htt.pad_len);
+ tx_frags[1].paddr = __cpu_to_le32(0);
+ tx_frags[1].len = __cpu_to_le32(0);
+
+ dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
- ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
- (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+ ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
- txfrag->data, frag_len);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
msdu->data, msdu->len);
skb_put(txdesc, desc_len);
cmd = (struct htt_cmd *)txdesc->data;
- memset(cmd, 0, desc_len);
tid = ATH10K_SKB_CB(msdu)->htt.tid;
@@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+ if (use_frags)
+ flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ else
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
- frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
-
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
cmd->data_tx.flags0 = flags0;
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
- cmd->data_tx.len = __cpu_to_le16(msdu->len);
+ cmd->data_tx.len = __cpu_to_le16(msdu->len -
+ skb_cb->htt.frag_len -
+ skb_cb->htt.pad_len);
cmd->data_tx.id = __cpu_to_le16(msdu_id);
- cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
- memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
-
- /* refcount is decremented by HTC and HTT completions until it reaches
- * zero and is freed */
- skb_cb = ATH10K_SKB_CB(txdesc);
- skb_cb->htt.msdu_id = msdu_id;
- skb_cb->htt.refcount = 2;
- skb_cb->htt.txfrag = txfrag;
- skb_cb->htt.msdu = msdu;
+ memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
- goto err;
+ goto err_unmap_msdu;
return 0;
-err:
- if (txfrag)
- ath10k_skb_unmap(dev, txfrag);
- if (txdesc)
- dev_kfree_skb_any(txdesc);
- if (txfrag)
- dev_kfree_skb_any(txfrag);
- if (msdu_id >= 0) {
- spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
- }
- ath10k_htt_tx_dec_pending(htt);
+
+err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
+err_pull_txfrag:
+ skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+ ath10k_htt_tx_dec_pending(htt);
+err:
return res;
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 44ed5af..8c1be768 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -24,18 +24,14 @@
#define SUPPORTED_FW_MAJOR 1
#define SUPPORTED_FW_MINOR 0
#define SUPPORTED_FW_RELEASE 0
-#define SUPPORTED_FW_BUILD 629
+#define SUPPORTED_FW_BUILD 636
-/* QCA988X 1.0 definitions */
-#define QCA988X_HW_1_0_VERSION 0x4000002c
-#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
-#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
-#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
-#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
-#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
@@ -53,6 +49,9 @@ enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
+
+ /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+ ATH10K_HW_TXRX_MGMT = 3,
};
enum ath10k_mcast2ucast_mode {
@@ -75,7 +74,11 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
-#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
+
+/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
+ * avoid a very expensive re-alignment in mac80211. */
+#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
@@ -169,6 +172,10 @@ enum ath10k_mcast2ucast_mode {
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_CHIP_ID_ADDRESS 0x000000ec
+#define SOC_CHIP_ID_REV_LSB 8
+#define SOC_CHIP_ID_REV_MASK 0x00000f00
+
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index cf2ba4d..99a9bad 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -460,6 +460,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
}
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn("WMI vdev start failed: ret %d\n", ret);
@@ -503,13 +508,10 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
{
struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
struct wmi_vdev_start_request_arg arg = {};
- enum nl80211_channel_type type;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
-
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -607,7 +609,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
goto vdev_fail;
}
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
ar->monitor_present = true;
@@ -639,7 +641,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
ar->monitor_present = false;
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
@@ -668,7 +670,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->vdev_id);
return;
}
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
@@ -752,14 +754,14 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
psmode = WMI_STA_PS_MODE_DISABLED;
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
psmode);
if (ar_iter->ret)
ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
- psmode, arvif->vdev_id);
}
/**********************/
@@ -949,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.num_rates = n;
arg->peer_num_spatial_streams = max((n+7) / 8, 1);
- ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->addr,
arg->peer_ht_rates.num_rates,
arg->peer_num_spatial_streams);
}
@@ -969,11 +972,11 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
arg->peer_flags |= WMI_PEER_QOS;
if (sta->wme && sta->uapsd_queues) {
- ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
arg->peer_flags |= WMI_PEER_APSD;
- arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
@@ -1048,7 +1051,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set =
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
- ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -1076,8 +1080,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
enum wmi_phy_mode phymode = MODE_UNKNOWN;
- /* FIXME: add VHT */
-
switch (ar->hw->conf.chandef.chan->band) {
case IEEE80211_BAND_2GHZ:
if (sta->ht_cap.ht_supported) {
@@ -1091,7 +1093,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
case IEEE80211_BAND_5GHZ:
- if (sta->ht_cap.ht_supported) {
+ /*
+ * Check VHT first.
+ */
+ if (sta->vht_cap.vht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AC_VHT80;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
@@ -1105,6 +1117,9 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath10k_wmi_phymode_str(phymode));
+
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
}
@@ -1162,15 +1177,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
bss_conf->bssid);
if (ret)
ath10k_warn("VDEV: %d up failed: ret %d\n",
arvif->vdev_id, ret);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "VDEV: %d associated, BSSID: %pM, AID: %d\n",
- arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
}
/*
@@ -1191,10 +1206,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* No idea why this happens, even though VDEV-DOWN is supposed
* to be analogous to link down, so just stop the VDEV.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
+ arvif->vdev_id);
+
+ /* FIXME: check return value */
ret = ath10k_vdev_stop(arvif);
- if (!ret)
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
- arvif->vdev_id);
/*
* If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
@@ -1203,12 +1219,10 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* interfaces as it expects there is no rx when no interface is
* running.
*/
- ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret)
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
- arvif->vdev_id, ret);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
- ath10k_wmi_flush_tx(ar);
+ /* FIXME: why don't we print error if wmi call fails? */
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
arvif->def_wep_key_index = 0;
}
@@ -1333,8 +1347,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
continue;
ath10k_dbg(ATH10K_DBG_WMI,
- "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
- __func__, ch - arg.channels, arg.n_channels,
+ "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ ch - arg.channels, arg.n_channels,
ch->freq, ch->max_power, ch->max_reg_power,
ch->max_antenna_gain, ch->mode);
@@ -1421,10 +1435,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
struct ieee80211_key_conf *key = info->control.hw_key;
int ret;
- /* TODO AP mode should be implemented */
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
-
if (!ieee80211_has_protected(hdr->frame_control))
return;
@@ -1438,7 +1448,8 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
if (key->keyidx == arvif->def_wep_key_index)
return;
- ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d keyidx %d\n",
+ arvif->vdev_id, key->keyidx);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DEF_KEYID,
@@ -1480,6 +1491,12 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int ret;
+ if (ar->htt.target_version_major >= 3) {
+ /* Since HTT 3.0 there is no separate mgmt tx command */
+ ret = ath10k_htt_tx(&ar->htt, skb);
+ goto exit;
+ }
+
if (ieee80211_is_mgmt(hdr->frame_control))
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
else if (ieee80211_is_nullfunc(hdr->frame_control))
@@ -1491,6 +1508,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
else
ret = ath10k_htt_tx(&ar->htt, skb);
+exit:
if (ret) {
ath10k_warn("tx failed (%d). dropping packet.\n", ret);
ieee80211_free_txskb(ar->hw, skb);
@@ -1534,7 +1552,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -1546,6 +1564,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
spin_unlock_bh(&ar->data_lock);
if (peer)
+ /* FIXME: should this use ath10k_warn()? */
ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
@@ -1643,8 +1662,6 @@ static int ath10k_abort_scan(struct ath10k *ar)
return -EIO;
}
- ath10k_wmi_flush_tx(ar);
-
ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
if (ret == 0)
ath10k_warn("timed out while waiting for scan to stop\n");
@@ -1678,10 +1695,6 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
- /* make sure we submit the command so the completion
- * timeout makes sense */
- ath10k_wmi_flush_tx(ar);
-
ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
if (ret == 0) {
ath10k_abort_scan(ar);
@@ -1727,8 +1740,10 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/* we must calculate tid before we apply qos workaround
* as we'd lose the qos control field */
tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
- if (ieee80211_is_data_qos(hdr->frame_control) &&
- is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tid = HTT_DATA_TX_EXT_TID_MGMT;
+ } else if (ieee80211_is_data_qos(hdr->frame_control) &&
+ is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
@@ -1742,7 +1757,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_seq_no(skb);
}
- memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
+ ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
ATH10K_SKB_CB(skb)->htt.tid = tid;
@@ -1884,7 +1899,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
conf->chandef.chan->center_freq);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
@@ -1901,7 +1916,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_monitor_destroy(ar);
}
- ath10k_wmi_flush_tx(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -1973,7 +1987,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
- ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
@@ -2052,7 +2066,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->beacon) {
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
+ }
+ spin_unlock_bh(&ar->data_lock);
ar->free_vdev_map |= 1 << (arvif->vdev_id);
@@ -2064,6 +2083,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
kfree(arvif->u.ap.noa_data);
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+ arvif->vdev_id);
+
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
ath10k_warn("WMI vdev delete failed: %d\n", ret);
@@ -2105,18 +2127,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
!ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
+ ar->monitor_vdev_id);
+
ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Unable to start monitor mode\n");
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
+ ar->monitor_vdev_id);
+
ret = ath10k_monitor_stop(ar);
if (ret)
ath10k_warn("Unable to stop monitor mode\n");
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
}
mutex_unlock(&ar->conf_mutex);
@@ -2141,41 +2165,41 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BEACON_INTERVAL,
arvif->beacon_interval);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d beacon_interval %d\n",
+ arvif->vdev_id, arvif->beacon_interval);
+
if (ret)
ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Beacon interval: %d set for VDEV: %d\n",
- arvif->beacon_interval, arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "vdev %d set beacon tx mode to staggered\n",
+ arvif->vdev_id);
+
ret = ath10k_wmi_pdev_set_param(ar,
WMI_PDEV_PARAM_BEACON_TX_MODE,
WMI_BEACON_STAGGERED_MODE);
if (ret)
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set staggered beacon mode for VDEV: %d\n",
- arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
arvif->dtim_period = info->dtim_period;
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d dtim_period %d\n",
+ arvif->vdev_id, arvif->dtim_period);
+
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DTIM_PERIOD,
arvif->dtim_period);
if (ret)
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set dtim period: %d for VDEV: %d\n",
- arvif->dtim_period, arvif->vdev_id);
}
if (changed & BSS_CHANGED_SSID &&
@@ -2188,16 +2212,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(info->bssid)) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d create peer %pM\n",
+ arvif->vdev_id, info->bssid);
+
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
info->bssid, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Added peer: %pM for VDEV: %d\n",
- info->bssid, arvif->vdev_id);
-
if (vif->type == NL80211_IFTYPE_STATION) {
/*
@@ -2207,11 +2230,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
memcpy(arvif->u.sta.bssid, info->bssid,
ETH_ALEN);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start %pM\n",
+ arvif->vdev_id, info->bssid);
+
+ /* FIXME: check return value */
ret = ath10k_vdev_start(arvif);
- if (!ret)
- ath10k_dbg(ATH10K_DBG_MAC,
- "VDEV: %d started with BSSID: %pM\n",
- arvif->vdev_id, info->bssid);
}
/*
@@ -2235,16 +2259,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
cts_prot = 0;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+ arvif->vdev_id, cts_prot);
+
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_ENABLE_RTSCTS,
cts_prot);
if (ret)
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set CTS prot: %d for VDEV: %d\n",
- cts_prot, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2255,16 +2278,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+ arvif->vdev_id, slottime);
+
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_SLOT_TIME,
slottime);
if (ret)
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set slottime: %d for VDEV: %d\n",
- slottime, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2274,16 +2296,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
preamble = WMI_VDEV_PREAMBLE_LONG;
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d preamble %dn",
+ arvif->vdev_id, preamble);
+
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_PREAMBLE,
preamble);
if (ret)
ath10k_warn("Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set preamble: %d for VDEV: %d\n",
- preamble, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ASSOC) {
@@ -2474,27 +2496,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d peer create %pM (new sta)\n",
+ arvif->vdev_id, sta->addr);
+
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
* Existing station deletion.
*/
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d peer delete %pM (sta gone)\n",
+ arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
@@ -2505,14 +2526,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New association.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
+ sta->addr);
+
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to associate station: %pM\n",
sta->addr);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Station %pM moved to assoc state\n",
- sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -2520,14 +2540,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Disassociation.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+ sta->addr);
+
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Station %pM moved to disassociated state\n",
- sta->addr);
}
mutex_unlock(&ar->conf_mutex);
@@ -2747,14 +2766,13 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
return;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts_threshold %d\n",
+ arvif->vdev_id, rts);
+
ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
if (ar_iter->ret)
ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set RTS threshold: %d for VDEV: %d\n",
- rts, arvif->vdev_id);
}
static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
@@ -2789,14 +2807,13 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
return;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation_threshold %d\n",
+ arvif->vdev_id, frag);
+
ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
if (ar_iter->ret)
ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set frag threshold: %d for VDEV: %d\n",
- frag, arvif->vdev_id);
}
static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -2836,8 +2853,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
- empty = bitmap_empty(ar->htt.used_msdu_ids,
- ar->htt.max_num_pending_tx);
+ empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
skip = (ar->state == ATH10K_STATE_WEDGED);
@@ -3326,6 +3342,10 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS;
+ /* MSDU can have HTT TX fragment pushed in front. The additional 4
+ * bytes is used for padding/alignment if necessary. */
+ ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
+
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e2f9ef5..dff23d9 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps;
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
-#define QCA988X_1_0_DEVICE_ID (0xabcd)
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
- { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{0}
};
@@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
static void ath10k_pci_process_ce(struct ath10k *ar);
static int ath10k_pci_post_rx(struct ath10k *ar);
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
static void ath10k_pci_device_reset(struct ath10k *ar);
static int ath10k_pci_reset_target(struct ath10k *ar);
@@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar);
static void ath10k_pci_stop_intr(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
- /* host->target HTC control and raw streams */
- { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
- /* could be moved to share CE3 */
- /* target->host HTT + HTC control */
- { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
- /* target->host WMI */
- { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
- /* host->target WMI */
- { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
- /* host->target HTT */
- { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
- CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
- /* unused */
- { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
- /* Target autonomous hif_memcpy */
- { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
- /* ce_diag, the Diagnostic Window */
- { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 32,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: unused */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = DIAG_TRANSFER_LIMIT,
+ .dest_nentries = 2,
+ },
};
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config target_ce_config_wlan[] = {
- /* host->target HTC control and raw streams */
- { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
- /* target->host HTT + HTC control */
- { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
- /* target->host WMI */
- { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* host->target WMI */
- { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* host->target HTT */
- { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = 0,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 256,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = 1,
+ .pipedir = PIPEDIR_IN,
+ .nentries = 32,
+ .nbytes_max = 512,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = 2,
+ .pipedir = PIPEDIR_IN,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = 3,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = 4,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 256,
+ .nbytes_max = 256,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
/* NB: 50% of src nentries, since tx has 2 frags */
- /* unused */
- { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* Reserved for target autonomous hif_memcpy */
- { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+
+ /* CE5: unused */
+ {
+ .pipenum = 5,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = 6,
+ .pipedir = PIPEDIR_INOUT,
+ .nentries = 32,
+ .nbytes_max = 4096,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
/* CE7 used only by Host */
};
@@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
dma_addr_t ce_data_base = 0;
@@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
@@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar)
ath10k_warn("Unable to wakeup target\n");
}
-void ath10k_do_pci_wake(struct ath10k *ar)
+int ath10k_do_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
atomic_inc(&ar_pci->keep_awake_count);
if (ar_pci->verified_awake)
- return;
+ return 0;
for (;;) {
if (ath10k_pci_target_is_awake(ar)) {
ar_pci->verified_awake = true;
- break;
+ return 0;
}
if (tot_delay > PCIE_WAKE_TIMEOUT) {
- ath10k_warn("target takes too long to wake up (awake count %d)\n",
+ ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
+ PCIE_WAKE_TIMEOUT,
atomic_read(&ar_pci->keep_awake_count));
- break;
+ return -ETIMEDOUT;
}
udelay(curr_delay);
@@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
* FIXME: Handle OOM properly.
*/
static inline
-struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k_pci_compl *compl = NULL;
@@ -511,39 +612,28 @@ exit:
}
/* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
- void *transfer_context,
- u32 ce_data,
- unsigned int nbytes,
- unsigned int transfer_id)
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
- bool process = false;
-
- do {
- /*
- * For the send completion of an item in sendlist, just
- * increment num_sends_allowed. The upper layer callback will
- * be triggered when last fragment is done with send.
- */
- if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
- spin_lock_bh(&pipe_info->pipe_lock);
- pipe_info->num_sends_allowed++;
- spin_unlock_bh(&pipe_info->pipe_lock);
- continue;
- }
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
+ &ce_data, &nbytes,
+ &transfer_id) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
- compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+ compl->state = ATH10K_PCI_COMPL_SEND;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
- compl->transfer_context = transfer_context;
+ compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = 0;
@@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
-
- process = true;
- } while (ath10k_ce_completed_send_next(ce_state,
- &transfer_context,
- &ce_data, &nbytes,
- &transfer_id) == 0);
-
- /*
- * If only some of the items within a sendlist have completed,
- * don't invoke completion processing until the entire sendlist
- * has been sent.
- */
- if (!process)
- return;
+ }
ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
- void *transfer_context, u32 ce_data,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
struct sk_buff *skb;
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
- do {
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &ce_data, &nbytes, &transfer_id,
+ &flags) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
- compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+ compl->state = ATH10K_PCI_COMPL_RECV;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
- compl->transfer_context = transfer_context;
+ compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = flags;
@@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
-
- } while (ath10k_ce_completed_recv_next(ce_state,
- &transfer_context,
- &ce_data, &nbytes,
- &transfer_id,
- &flags) == 0);
+ }
ath10k_pci_process_ce(ar);
}
@@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
- struct ce_state *ce_hdl = pipe_info->ce_hdl;
- struct ce_sendlist sendlist;
+ struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+ struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
unsigned int len;
u32 flags = 0;
int ret;
- memset(&sendlist, 0, sizeof(struct ce_sendlist));
-
len = min(bytes, nbuf->len);
bytes -= len;
@@ -648,8 +720,6 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
"ath10k tx: data: ",
nbuf->data, nbuf->len);
- ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
-
/* Make sure we have resources to handle this request */
spin_lock_bh(&pipe_info->pipe_lock);
if (!pipe_info->num_sends_allowed) {
@@ -660,7 +730,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
pipe_info->num_sends_allowed--;
spin_unlock_bh(&pipe_info->pipe_lock);
- ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+ ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
+ skb_cb->paddr, len, flags);
if (ret)
ath10k_warn("CE send failed: %p\n", nbuf);
@@ -670,7 +741,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
+ struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
int ret;
spin_lock_bh(&pipe_info->pipe_lock);
@@ -764,9 +835,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
static int ath10k_pci_start_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_diag = ar_pci->ce_diag;
+ struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
const struct ce_attr *attr;
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
int i, pipe_num, completions, disable_interrupts;
@@ -805,15 +876,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
continue;
for (i = 0; i < completions; i++) {
- compl = kmalloc(sizeof(struct ath10k_pci_compl),
- GFP_KERNEL);
+ compl = kmalloc(sizeof(*compl), GFP_KERNEL);
if (!compl) {
ath10k_warn("No memory for completion state\n");
ath10k_pci_stop_ce(ar);
return -ENOMEM;
}
- compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ compl->state = ATH10K_PCI_COMPL_FREE;
list_add_tail(&compl->list, &pipe_info->compl_free);
}
}
@@ -840,7 +910,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar)
* their associated resources */
spin_lock_bh(&ar_pci->compl_lock);
list_for_each_entry(compl, &ar_pci->compl_process, list) {
- skb = (struct sk_buff *)compl->transfer_context;
+ skb = compl->skb;
ATH10K_SKB_CB(skb)->is_aborted = true;
}
spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +920,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_compl *compl, *tmp;
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
struct sk_buff *netbuf;
int pipe_num;
@@ -861,7 +931,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
list_del(&compl->list);
- netbuf = (struct sk_buff *)compl->transfer_context;
+ netbuf = compl->skb;
dev_kfree_skb_any(netbuf);
kfree(compl);
}
@@ -912,12 +982,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
list_del(&compl->list);
spin_unlock_bh(&ar_pci->compl_lock);
- if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+ switch (compl->state) {
+ case ATH10K_PCI_COMPL_SEND:
cb->tx_completion(ar,
- compl->transfer_context,
+ compl->skb,
compl->transfer_id);
send_done = 1;
- } else {
+ break;
+ case ATH10K_PCI_COMPL_RECV:
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
if (ret) {
ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@@ -925,7 +997,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
break;
}
- skb = (struct sk_buff *)compl->transfer_context;
+ skb = compl->skb;
nbytes = compl->nbytes;
ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,9 +1016,17 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
nbytes,
skb->len + skb_tailroom(skb));
}
+ break;
+ case ATH10K_PCI_COMPL_FREE:
+ ath10k_warn("free completion cannot be processed\n");
+ break;
+ default:
+ ath10k_warn("invalid completion state (%d)\n",
+ compl->state);
+ break;
}
- compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ compl->state = ATH10K_PCI_COMPL_FREE;
/*
* Add completion back to the pipe's free list.
@@ -1037,12 +1117,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num)
{
struct ath10k *ar = pipe_info->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = pipe_info->ce_hdl;
+ struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
struct sk_buff *skb;
dma_addr_t ce_data;
int i, ret = 0;
@@ -1097,7 +1177,7 @@ err:
static int ath10k_pci_post_rx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num, ret = 0;
@@ -1147,11 +1227,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
u32 buf_sz;
struct sk_buff *netbuf;
u32 ce_data;
@@ -1179,11 +1259,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
}
}
-static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
struct sk_buff *netbuf;
u32 ce_data;
unsigned int nbytes;
@@ -1206,15 +1286,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
- if (netbuf != CE_SENDLIST_ITEM_CTXT)
- /*
- * Indicate the completion to higer layer to free
- * the buffer
- */
- ATH10K_SKB_CB(netbuf)->is_aborted = true;
- ar_pci->msg_callbacks_current.tx_completion(ar,
- netbuf,
- id);
+ /*
+ * Indicate the completion to higer layer to free
+ * the buffer
+ */
+ ATH10K_SKB_CB(netbuf)->is_aborted = true;
+ ar_pci->msg_callbacks_current.tx_completion(ar,
+ netbuf,
+ id);
}
}
@@ -1232,7 +1311,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
pipe_info = &ar_pci->pipe_info[pipe_num];
ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,7 +1322,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@@ -1293,8 +1372,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
void *resp, u32 *resp_len)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
- struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+ struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+ struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
dma_addr_t req_paddr = 0;
dma_addr_t resp_paddr = 0;
struct bmi_xfer xfer = {};
@@ -1378,13 +1459,16 @@ err_dma:
return ret;
}
-static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
- void *transfer_context,
- u32 data,
- unsigned int nbytes,
- unsigned int transfer_id)
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
{
- struct bmi_xfer *xfer = transfer_context;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+
+ if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id))
+ return;
if (xfer->wait_for_resp)
return;
@@ -1392,14 +1476,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
complete(&xfer->done);
}
-static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
- void *transfer_context,
- u32 data,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
- struct bmi_xfer *xfer = transfer_context;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
+
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id, &flags))
+ return;
if (!xfer->wait_for_resp) {
ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1679,7 +1766,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num;
@@ -1895,7 +1982,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
static void ath10k_pci_ce_tasklet(unsigned long ptr)
{
- struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+ struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
struct ath10k_pci *ar_pci = pipe->ar_pci;
ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -2212,18 +2299,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
static void ath10k_pci_device_reset(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *mem = ar_pci->mem;
int i;
u32 val;
if (!SOC_GLOBAL_RESET_ADDRESS)
return;
- if (!mem)
- return;
-
- ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_V_MASK);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (ath10k_pci_target_is_awake(ar))
@@ -2232,12 +2314,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
}
/* Put Target, including PCIe, into RESET. */
- val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
val |= 1;
- ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK)
break;
msleep(1);
@@ -2245,16 +2327,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
/* Pull Target, including PCIe, out of RESET. */
val &= ~1;
- ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK))
break;
msleep(1);
}
- ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
}
static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2349,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
switch (i) {
case ATH10K_PCI_FEATURE_MSI_X:
- ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
- break;
- case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
- ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
break;
case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
- ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
break;
}
}
@@ -2286,7 +2365,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
int ret = 0;
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- u32 lcr_val;
+ u32 lcr_val, chip_id;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
@@ -2298,9 +2377,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev;
switch (pci_dev->device) {
- case QCA988X_1_0_DEVICE_ID:
- set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
- break;
case QCA988X_2_0_DEVICE_ID:
set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
break;
@@ -2322,10 +2398,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
- /* Enable QCA988X_1.0 HW workarounds */
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
- spin_lock_init(&ar_pci->hw_v1_workaround_lock);
-
ar_pci->ar = ar;
ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2395,9 +2467,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
spin_lock_init(&ar_pci->ce_lock);
- ar_pci->cacheline_sz = dma_get_cache_alignment();
+ ret = ath10k_do_pci_wake(ar);
+ if (ret) {
+ ath10k_err("Failed to get chip id: %d\n", ret);
+ return ret;
+ }
+
+ chip_id = ath10k_pci_read32(ar,
+ RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+
+ ath10k_do_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
- ret = ath10k_core_register(ar);
+ ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
goto err_iomap;
@@ -2414,7 +2497,6 @@ err_region:
err_device:
pci_disable_device(pdev);
err_ar:
- pci_set_drvdata(pdev, NULL);
ath10k_core_destroy(ar);
err_ar_pci:
/* call HIF PCI free here */
@@ -2442,7 +2524,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_core_unregister(ar);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
pci_clear_master(pdev);
@@ -2483,9 +2564,6 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 871bb33..7c49f6f 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,22 +43,23 @@ struct bmi_xfer {
u32 resp_len;
};
+enum ath10k_pci_compl_state {
+ ATH10K_PCI_COMPL_FREE = 0,
+ ATH10K_PCI_COMPL_SEND,
+ ATH10K_PCI_COMPL_RECV,
+};
+
struct ath10k_pci_compl {
struct list_head list;
- int send_or_recv;
- struct ce_state *ce_state;
- struct hif_ce_pipe_info *pipe_info;
- void *transfer_context;
+ enum ath10k_pci_compl_state state;
+ struct ath10k_ce_pipe *ce_state;
+ struct ath10k_pci_pipe *pipe_info;
+ struct sk_buff *skb;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
};
-/* compl_state.send_or_recv */
-#define HIF_CE_COMPLETE_FREE 0
-#define HIF_CE_COMPLETE_SEND 1
-#define HIF_CE_COMPLETE_RECV 2
-
/*
* PCI-specific Target state
*
@@ -152,17 +153,16 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
- ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
- ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
+ ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
};
/* Per-pipe state. */
-struct hif_ce_pipe_info {
+struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
@@ -190,7 +190,6 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
- int cacheline_sz;
DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
@@ -219,7 +218,7 @@ struct ath10k_pci {
bool compl_processing;
- struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+ struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
@@ -227,16 +226,13 @@ struct ath10k_pci {
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
- struct ce_state *ce_id_to_state[CE_COUNT_MAX];
-
- /* makes sure that dummy reads are atomic */
- spinlock_t hw_v1_workaround_lock;
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -244,14 +240,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
return ar->hif.priv;
}
-static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
- return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
-static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
- iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
@@ -310,23 +310,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *addr = ar_pci->mem;
-
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
- unsigned long irq_flags;
- spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
-
- ioread32(addr+offset+4); /* 3rd read prior to write */
- ioread32(addr+offset+4); /* 2nd read prior to write */
- ioread32(addr+offset+4); /* 1st read prior to write */
- iowrite32(value, addr+offset);
-
- spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
- irq_flags);
- } else {
- iowrite32(value, addr+offset);
- }
+ iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
@@ -336,15 +321,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
-void ath10k_do_pci_wake(struct ath10k *ar);
+int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
-static inline void ath10k_pci_wake(struct ath10k *ar)
+static inline int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
- ath10k_do_pci_wake(ar);
+ return ath10k_do_pci_wake(ar);
+
+ return 0;
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index bfec6c8..1c584c4 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,10 +422,30 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
+/* The decapped header (rx_hdr_status) contains the following:
+ * a) 802.11 header
+ * [padding to 4 bytes]
+ * b) HW crypto parameter
+ * - 0 bytes for no security
+ * - 4 bytes for WEP
+ * - 8 bytes for TKIP, AES
+ * [padding to 4 bytes]
+ * c) A-MSDU subframe header (14 bytes) if appliable
+ * d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b). */
enum rx_msdu_decap_format {
- RX_MSDU_DECAP_RAW = 0,
- RX_MSDU_DECAP_NATIVE_WIFI = 1,
+ RX_MSDU_DECAP_RAW = 0,
+
+ /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+ * htt_rx_desc contains the original decapped 802.11 header. */
+ RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+ /* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+ /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+ * total), followed by an RFC1042 header (8 bytes). */
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 85e806b..90817dd 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(int id, void *buf, size_t buf_len),
+ TP_PROTO(int id, void *buf, size_t buf_len, int ret),
- TP_ARGS(id, buf, buf_len),
+ TP_ARGS(id, buf, buf_len, ret),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
+ __field(int, ret)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
+ __entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "id %d len %zu",
+ "id %d len %zu ret %d",
__entry->id,
- __entry->buf_len
+ __entry->buf_len,
+ __entry->ret
)
);
@@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
)
);
+TRACE_EVENT(ath10k_htt_stats,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "len %zu",
+ __entry->buf_len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 68b6fae..5ae373a 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -44,40 +44,39 @@ out:
spin_unlock_bh(&ar->data_lock);
}
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
{
struct device *dev = htt->ar->dev;
struct ieee80211_tx_info *info;
- struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
- struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+ struct ath10k_skb_cb *skb_cb;
+ struct sk_buff *msdu;
int ret;
- if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
- return;
-
- ATH10K_SKB_CB(txdesc)->htt.refcount--;
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+ tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
- if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+ ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+ tx_done->msdu_id);
return;
-
- if (txfrag) {
- ret = ath10k_skb_unmap(dev, txfrag);
- if (ret)
- ath10k_warn("txfrag unmap failed (%d)\n", ret);
-
- dev_kfree_skb_any(txfrag);
}
+ msdu = htt->pending_tx[tx_done->msdu_id];
+ skb_cb = ATH10K_SKB_CB(msdu);
+
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
+ if (skb_cb->htt.frag_len)
+ skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
- memset(&info->status, 0, sizeof(info->status));
- if (ATH10K_SKB_CB(txdesc)->htt.discard) {
+ if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
@@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
+ if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
@@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
exit:
spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
+ htt->pending_tx[tx_done->msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
- if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
+ if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
-
- dev_kfree_skb_any(txdesc);
-}
-
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done)
-{
- struct sk_buff *txdesc;
-
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
- tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
-
- if (tx_done->msdu_id >= htt->max_num_pending_tx) {
- ath10k_warn("warning: msdu_id %d too big, ignoring\n",
- tx_done->msdu_id);
- return;
- }
-
- txdesc = htt->pending_tx[tx_done->msdu_id];
-
- ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
- ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
-
- ath10k_txrx_tx_unref(htt, txdesc);
}
static const u8 rx_legacy_rate_idx[] = {
@@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->vht_nss,
status->freq,
status->band);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e78632a..356dc9c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,9 +19,8 @@
#include "htt.h"
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done);
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 55f90c7..6803ead 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -23,30 +23,6 @@
#include "wmi.h"
#include "mac.h"
-void ath10k_wmi_flush_tx(struct ath10k *ar)
-{
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- if (ar->state == ATH10K_STATE_WEDGED) {
- ath10k_warn("wmi flush skipped - device is wedged anyway\n");
- return;
- }
-
- ret = wait_event_timeout(ar->wmi.wq,
- atomic_read(&ar->wmi.pending_tx_count) == 0,
- 5*HZ);
- if (atomic_read(&ar->wmi.pending_tx_count) == 0)
- return;
-
- if (ret == 0)
- ret = -ETIMEDOUT;
-
- if (ret < 0)
- ath10k_warn("wmi flush failed (%d)\n", ret);
-}
-
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
int ret;
@@ -85,18 +61,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
dev_kfree_skb(skb);
-
- if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
- wake_up(&ar->wmi.wq);
}
-/* WMI command API */
-static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
- enum wmi_cmd_id cmd_id)
+static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct wmi_cmd_hdr *cmd_hdr;
- int status;
+ int ret;
u32 cmd = 0;
if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
@@ -107,25 +79,87 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
- if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
- WMI_MAX_PENDING_TX_COUNT) {
- /* avoid using up memory when FW hangs */
- atomic_dec(&ar->wmi.pending_tx_count);
- return -EBUSY;
- }
-
memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+ trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
- trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
+ if (ret)
+ goto err_pull;
- status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
- if (status) {
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+ struct wmi_bcn_tx_arg arg = {0};
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->data_lock);
+
+ if (arvif->beacon == NULL)
+ return;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.tx_rate = 0;
+ arg.tx_power = 0;
+ arg.bcn = arvif->beacon->data;
+ arg.bcn_len = arvif->beacon->len;
+
+ ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+ if (ret)
+ return;
+
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_wmi_tx_beacons_iter,
+ NULL);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ wake_up(&ar->wmi.tx_credits_wq);
+}
+
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id)
+{
+ int ret = -EINVAL;
+
+ wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+ (ret != -EAGAIN);
+ }), 3*HZ);
+
+ if (ret)
dev_kfree_skb_any(skb);
- atomic_dec(&ar->wmi.pending_tx_count);
- return status;
- }
- return 0;
+ return ret;
}
static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
@@ -315,7 +349,9 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
- struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+ struct wmi_mgmt_rx_event_v1 *ev_v1;
+ struct wmi_mgmt_rx_event_v2 *ev_v2;
+ struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u32 rx_status;
@@ -325,13 +361,24 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rate;
u32 buf_len;
u16 fc;
+ int pull_len;
+
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+ ev_hdr = &ev_v2->hdr.v1;
+ pull_len = sizeof(*ev_v2);
+ } else {
+ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+ ev_hdr = &ev_v1->hdr;
+ pull_len = sizeof(*ev_v1);
+ }
- channel = __le32_to_cpu(event->hdr.channel);
- buf_len = __le32_to_cpu(event->hdr.buf_len);
- rx_status = __le32_to_cpu(event->hdr.status);
- snr = __le32_to_cpu(event->hdr.snr);
- phy_mode = __le32_to_cpu(event->hdr.phy_mode);
- rate = __le32_to_cpu(event->hdr.rate);
+ channel = __le32_to_cpu(ev_hdr->channel);
+ buf_len = __le32_to_cpu(ev_hdr->buf_len);
+ rx_status = __le32_to_cpu(ev_hdr->status);
+ snr = __le32_to_cpu(ev_hdr->snr);
+ phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
+ rate = __le32_to_cpu(ev_hdr->rate);
memset(status, 0, sizeof(*status));
@@ -358,7 +405,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
- skb_pull(skb, sizeof(event->hdr));
+ skb_pull(skb, pull_len);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -734,10 +781,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
int i = -1;
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
- struct wmi_bcn_tx_arg arg;
struct sk_buff *bcn;
int vdev_id = 0;
- int ret;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
@@ -794,17 +839,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
- arg.vdev_id = arvif->vdev_id;
- arg.tx_rate = 0;
- arg.tx_power = 0;
- arg.bcn = bcn->data;
- arg.bcn_len = bcn->len;
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->beacon) {
+ ath10k_warn("SWBA overrun on vdev %d\n",
+ arvif->vdev_id);
+ dev_kfree_skb_any(arvif->beacon);
+ }
- ret = ath10k_wmi_beacon_send(ar, &arg);
- if (ret)
- ath10k_warn("could not send beacon (%d)\n", ret);
+ arvif->beacon = bcn;
- dev_kfree_skb_any(bcn);
+ ath10k_wmi_tx_beacon_nowait(arvif);
+ spin_unlock_bh(&ar->data_lock);
}
}
@@ -943,6 +988,9 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+ if (ar->fw_version_build > 636)
+ set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
+
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
@@ -1007,7 +1055,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_event_id id;
@@ -1126,64 +1174,18 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
-static void ath10k_wmi_event_work(struct work_struct *work)
-{
- struct ath10k *ar = container_of(work, struct ath10k,
- wmi.wmi_event_work);
- struct sk_buff *skb;
-
- for (;;) {
- skb = skb_dequeue(&ar->wmi.wmi_event_list);
- if (!skb)
- break;
-
- ath10k_wmi_event_process(ar, skb);
- }
-}
-
-static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
-{
- struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
- enum wmi_event_id event_id;
-
- event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
-
- /* some events require to be handled ASAP
- * thus can't be defered to a worker thread */
- switch (event_id) {
- case WMI_HOST_SWBA_EVENTID:
- case WMI_MGMT_RX_EVENTID:
- ath10k_wmi_event_process(ar, skb);
- return;
- default:
- break;
- }
-
- skb_queue_tail(&ar->wmi.wmi_event_list, skb);
- queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
-}
-
/* WMI Initialization functions */
int ath10k_wmi_attach(struct ath10k *ar)
{
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
- init_waitqueue_head(&ar->wmi.wq);
-
- skb_queue_head_init(&ar->wmi.wmi_event_list);
- INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
- /* HTC should've drained the packets already */
- if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
- ath10k_warn("there are still pending packets\n");
-
- cancel_work_sync(&ar->wmi.wmi_event_work);
- skb_queue_purge(&ar->wmi.wmi_event_list);
}
int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@@ -1198,6 +1200,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
/* these fields are the same for all service endpoints */
conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+ conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
@@ -2108,7 +2111,8 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
}
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+ const struct wmi_bcn_tx_arg *arg)
{
struct wmi_bcn_tx_cmd *cmd;
struct sk_buff *skb;
@@ -2124,7 +2128,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
- return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+ return ath10k_wmi_cmd_send_nowait(ar, skb, WMI_BCN_TX_CMDID);
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2c5a4f8..2c52c23 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -508,6 +508,48 @@ enum wmi_phy_mode {
MODE_MAX = 14
};
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled */
+ };
+
+ return "<unknown>";
+}
+
#define WMI_CHAN_LIST_TAG 0x1
#define WMI_SSID_LIST_TAG 0x2
#define WMI_BSSID_LIST_TAG 0x3
@@ -763,14 +805,6 @@ struct wmi_service_ready_event {
struct wlan_host_mem_req mem_reqs[1];
} __packed;
-/*
- * status consists of upper 16 bits fo int status and lower 16 bits of
- * module ID that retuned status
- */
-#define WLAN_INIT_STATUS_SUCCESS 0x0
-#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
-#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
-
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
@@ -1268,7 +1302,7 @@ struct wmi_scan_event {
* good idea to pass all the fields in the RX status
* descriptor up to the host.
*/
-struct wmi_mgmt_rx_hdr {
+struct wmi_mgmt_rx_hdr_v1 {
__le32 channel;
__le32 snr;
__le32 rate;
@@ -1277,8 +1311,18 @@ struct wmi_mgmt_rx_hdr {
__le32 status; /* %WMI_RX_STATUS_ */
} __packed;
-struct wmi_mgmt_rx_event {
- struct wmi_mgmt_rx_hdr hdr;
+struct wmi_mgmt_rx_hdr_v2 {
+ struct wmi_mgmt_rx_hdr_v1 v1;
+ __le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+ struct wmi_mgmt_rx_hdr_v1 hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+ struct wmi_mgmt_rx_hdr_v2 hdr;
u8 buf[0];
} __packed;
@@ -3000,7 +3044,6 @@ struct wmi_force_fw_hang_cmd {
#define WMI_MAX_EVENT 0x1000
/* Maximum number of pending TXed WMI packets */
-#define WMI_MAX_PENDING_TX_COUNT 128
#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
/* By default disable power save for IBSS */
@@ -3013,7 +3056,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
-void ath10k_wmi_flush_tx(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -3066,7 +3108,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+ const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e9bc9e6..79bffe1 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -37,12 +37,9 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_hw *ah = common->priv;
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u16 *eeprom, *eeprom_end;
-
-
- bcfg = pdev->dev.platform_data;
eeprom = (u16 *) bcfg->radio;
eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
@@ -57,7 +54,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
ah->ah_mac_srev = bcfg->devid;
return 0;
}
@@ -65,7 +62,7 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u8 *cfg_mac;
if (to_platform_device(ah->dev)->id == 0)
@@ -87,7 +84,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
/*Initialization*/
static int ath_ahb_probe(struct platform_device *pdev)
{
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
struct resource *res;
@@ -96,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int ret = 0;
u32 reg;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
ret = -EINVAL;
goto err_out;
@@ -193,7 +190,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
static int ath_ahb_remove(struct platform_device *pdev)
{
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct ath5k_hw *ah;
u32 reg;
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 072e4b5..2dff276 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
struct platform_device *pdev = to_platform_device(sc->dev);
struct ath9k_platform_data *pdata;
- pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: flash read failed, offset %08x is out of range\n",
@@ -84,7 +84,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
struct ath_hw *ah;
char hw_name[64];
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index be466b0..d28923b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -338,10 +338,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
aniState->cckNoiseImmunityLevel !=
ATH9K_ANI_CCK_DEF_LEVEL) {
ath_dbg(common, ANI,
- "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ "Restore defaults: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
- chan->channelFlags,
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
@@ -354,10 +353,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
* restore historical levels for this channel
*/
ath_dbg(common, ANI,
- "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ "Restore history: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
- chan->channelFlags,
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index dd1cc73..bd048cc 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -332,7 +332,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
}
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
+ div_ant_conf->lna1_lna2_switch_delta)
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
else
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
@@ -554,42 +554,22 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x10: /* LNA2 A-B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x1;
- }
+ ant_conf->fast_div_bias = 0x2;
break;
case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x39;
+ ant_conf->fast_div_bias = 0x3f;
break;
case 0x13: /* LNA2 A+B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x1;
- }
+ ant_conf->fast_div_bias = 0x2;
break;
case 0x20: /* LNA1 A-B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x4;
- }
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x6;
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x23: /* LNA1 A+B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x6;
- }
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
@@ -638,7 +618,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_sub = alt_rssi_avg;
antcomb->scan = false;
if (antcomb->rssi_lna2 >
- (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+ (antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
/* use LNA2 as main LNA */
if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
(antcomb->rssi_add > antcomb->rssi_sub)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 0865647..ff415e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -626,12 +626,11 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
if (AR_SREV_9287_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+ val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
+
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
- REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
-
if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*
@@ -667,14 +666,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_FC_DYN2040_EN;
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
+ if (IS_CHAN_HT40PLUS(chan))
phymode |= AR_PHY_FC_DYN2040_PRI_CH;
}
REG_WRITE(ah, AR_PHY_TURBO, phymode);
- ath9k_hw_set11nmac2040(ah);
+ ath9k_hw_set11nmac2040(ah, chan);
ENABLE_REGWRITE_BUFFER(ah);
@@ -692,31 +690,12 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
int i, regWrites = 0;
u32 modesIndex, freqIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- freqIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
+ if (IS_CHAN_5GHZ(chan)) {
freqIndex = 1;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- freqIndex = 2;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ } else {
freqIndex = 2;
- break;
-
- default:
- return -EINVAL;
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
}
/*
@@ -815,8 +794,10 @@ static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
if (chan == NULL)
return;
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
if (!AR_SREV_9280_20_OR_LATER(ah))
rfMode |= (IS_CHAN_5GHZ(chan)) ?
@@ -1219,12 +1200,11 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
iniDef = &aniState->iniDef;
- ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
- chan->channel,
- chan->channelFlags);
+ chan->channel);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 9f58974..cdc7400 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -33,15 +33,12 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
bool supported = false;
switch (ah->supp_cals & cal_type) {
case IQ_MISMATCH_CAL:
- /* Run IQ Mismatch for non-CCK only */
- if (!IS_CHAN_B(chan))
- supported = true;
+ supported = true;
break;
case ADC_GAIN_CAL:
case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
- if (!IS_CHAN_B(chan) &&
- !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+ if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
IS_CHAN_HT20(chan)))
supported = true;
break;
@@ -671,7 +668,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
- nfcal_pending = ah->caldata->nfcal_pending;
+ nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (currCal && !nfcal &&
(currCal->calState == CAL_RUNNING ||
@@ -861,7 +858,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ar9002_hw_pa_cal(ah, true);
if (ah->caldata)
- ah->caldata->nfcal_pending = true;
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index fb61b08..5c95fd9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -419,28 +419,10 @@ void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
u32 modesIndex;
int i;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
ENABLE_REGWRITE_BUFFER(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 1fc1fa9..17970d4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -485,7 +485,7 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
if (IS_CHAN_HT40(ah->curchan))
nfarray[3] = sign_extend32(nf, 8);
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ if (!(ah->rxchainmask & BIT(1)))
return;
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
@@ -532,6 +532,7 @@ static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
AR_PHY_9285_FAST_DIV_BIAS_S;
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 6988e1d..22934d3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -727,8 +727,12 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
- if (caldata)
- caldata->done_txiqcal_once = is_reusable;
+ if (caldata) {
+ if (is_reusable)
+ set_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ else
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ }
return;
}
@@ -961,18 +965,44 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
}
static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
- struct ath9k_channel *chan)
+ struct ath9k_channel *chan,
+ bool run_rtt_cal)
{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
return;
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
+ return;
+
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->rxchainmask & (1 << i)))
continue;
ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
}
+
+ if (caldata)
+ set_bit(SW_PKDET_DONE, &caldata->cal_flags);
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
+ if (IS_CHAN_2GHZ(chan)){
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ } else {
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ }
+ }
}
static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
@@ -990,7 +1020,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_CLC_SUCCESS);
- if (caldata->done_txclcal_once) {
+ if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
continue;
@@ -1006,7 +1036,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
caldata->tx_clcal[i][j] =
REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
}
- caldata->done_txclcal_once = true;
+ set_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
}
@@ -1019,6 +1049,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ u32 rx_delay = 0;
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1042,17 +1073,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
ar9003_hw_rtt_clear_hist(ah);
}
- if (rtt && !run_rtt_cal) {
- agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
- agc_supp_cals &= agc_ctrl;
- agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
- AR_PHY_AGC_CONTROL_FLTR_CAL |
- AR_PHY_AGC_CONTROL_PKDET_CAL);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ if (rtt) {
+ if (!run_rtt_cal) {
+ agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ agc_supp_cals &= agc_ctrl;
+ agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
+ AR_PHY_AGC_CONTROL_FLTR_CAL |
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ } else {
+ if (ah->ah_flags & AH_FASTCC)
+ run_agc_cal = true;
+ }
}
if (ah->enabled_cals & TX_CL_CAL) {
- if (caldata && caldata->done_txclcal_once)
+ if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
AR_PHY_CL_CAL_ENABLE);
else {
@@ -1076,14 +1112,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
* AGC calibration
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- if (caldata && !caldata->done_txiqcal_once)
+ if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
else
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
- } else if (caldata && !caldata->done_txiqcal_once) {
+ } else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
run_agc_cal = true;
sep_iq_cal = true;
}
@@ -1099,6 +1135,15 @@ skip_tx_iqcal:
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
}
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
+ /* Disable BB_active */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ }
+
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1110,7 +1155,12 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
- ar9003_hw_do_manual_peak_cal(ah, chan);
+ ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+ }
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
+ udelay(5);
}
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1133,19 +1183,23 @@ skip_tx_iqcal:
if (txiqcal_done)
ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
- else if (caldata && caldata->done_txiqcal_once)
+ else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
ar9003_hw_cl_cal_post_proc(ah, is_reusable);
if (run_rtt_cal && caldata) {
if (is_reusable) {
- if (!ath9k_hw_rfbus_req(ah))
+ if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah),
"Could not stop baseband\n");
- else
+ } else {
ar9003_hw_rtt_fill_hist(ah);
+ if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
+ ar9003_hw_rtt_load_hist(ah);
+ }
+
ath9k_hw_rfbus_done(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index f486480..1ec5235 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -2991,7 +2991,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
case EEP_CHAIN_MASK_REDUCE:
return (pBase->miscConfiguration >> 0x3) & 0x1;
case EEP_ANT_DIV_CTL1:
- return eep->base_ext1.ant_div_control;
+ if (AR_SREV_9565(ah))
+ return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
+ else
+ return eep->base_ext1.ant_div_control;
case EEP_ANTENNA_GAIN_5G:
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
@@ -3424,12 +3427,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct ar9300_base_eep_hdr *pBase;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
- len += snprintf(buf + len, size - len,
- "%20s :\n", "5GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
goto out;
@@ -3479,8 +3482,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- ah->eeprom.ar9300_eep.macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ ah->eeprom.ar9300_eep.macAddr);
out:
if (len > size)
len = size;
@@ -3656,9 +3659,23 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9565(ah)) {
if (common->bt_ant_diversity) {
regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+
+ /* Force WLAN LNA diversity ON */
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+
+ /* Force WLAN LNA diversity OFF */
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
}
}
@@ -3669,7 +3686,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
regval &= (~AR_FAST_DIV_ENABLE);
regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
- if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ && common->bt_ant_diversity)
regval |= AR_FAST_DIV_ENABLE;
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 75d4fb4..0e5daa5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -52,6 +52,8 @@
#define AR9300_PAPRD_SCALE_2 0x70000000
#define AR9300_PAPRD_SCALE_2_S 28
+#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
+
/* Delta from which to start power to pdadc table */
/* This offset is used in both open loop and closed loop power control
* schemes. In open loop power control, it is not really needed, but for
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 608bb48..b07f164 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -364,6 +364,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
} else {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -628,6 +630,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_rx_gain_table_2p0);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_Common_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9300Common_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 8dd0692..7b94a6c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -753,9 +753,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
- caldata->rtt_done = false;
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
+ clear_bit(RTT_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e897648..f3adafd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -551,8 +551,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_GC_DYN2040_EN;
/* Configure control (primary) channel at +-10MHz */
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
+ if (IS_CHAN_HT40PLUS(chan))
phymode |= AR_PHY_GC_DYN2040_PRI_CH;
}
@@ -565,7 +564,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
/* Configure MAC for 20/40 operation */
- ath9k_hw_set11nmac2040(ah);
+ ath9k_hw_set11nmac2040(ah, chan);
/* global transmit timeout (25 TUs default)*/
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
@@ -627,11 +626,10 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
* MAC addr only will fail.
*/
val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
- REG_WRITE(ah, AR_PCU_MISC_MODE2,
- val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
-
- REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ val |= AR_AGG_WEP_ENABLE_FIX |
+ AR_AGG_WEP_ENABLE |
+ AR_PCU_MISC_MODE2_CFP_IGNORE;
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
@@ -683,41 +681,22 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
{
int ret;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- if (chan->channel <= 5350)
- ret = 1;
- else if ((chan->channel > 5350) && (chan->channel <= 5600))
- ret = 3;
+ if (IS_CHAN_2GHZ(chan)) {
+ if (IS_CHAN_HT40(chan))
+ return 7;
else
- ret = 5;
- break;
-
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- if (chan->channel <= 5350)
- ret = 2;
- else if ((chan->channel > 5350) && (chan->channel <= 5600))
- ret = 4;
- else
- ret = 6;
- break;
-
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- ret = 8;
- break;
+ return 8;
+ }
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- ret = 7;
- break;
+ if (chan->channel <= 5350)
+ ret = 1;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 3;
+ else
+ ret = 5;
- default:
- ret = -EINVAL;
- }
+ if (IS_CHAN_HT40(chan))
+ ret++;
return ret;
}
@@ -728,28 +707,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
unsigned int regWrites = 0, i;
u32 modesIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return -EINVAL;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
/*
* SOC, MAC, BB, RADIO initvals.
@@ -847,8 +808,10 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (chan == NULL)
return;
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
@@ -1274,12 +1237,11 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState = &ah->ani;
iniDef = &aniState->iniDef;
- ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
- chan->channel,
- chan->channelFlags);
+ chan->channel);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1375,15 +1337,19 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_ANT_FAST_DIV_BIAS_S;
if (AR_SREV_9330_11(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 1;
} else if (AR_SREV_9485(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 2;
} else if (AR_SREV_9565(ah)) {
- antconf->lna1_lna2_delta = -3;
+ antconf->lna1_lna2_switch_delta = 3;
+ antconf->lna1_lna2_delta = -9;
antconf->div_group = 3;
} else {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
@@ -1489,17 +1455,24 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
} else if (AR_SREV_9565(ah)) {
if (enable) {
REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
- if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
- REG_SET_BIT(ah, AR_PHY_RESTART,
- AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
- REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
- REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
@@ -1526,28 +1499,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
unsigned int regWrites = 0;
u32 modesIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return -EINVAL;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
if (modesIndex == ah->modes_index) {
*ini_reloaded = false;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6fd7523..fca6243 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -343,8 +343,12 @@
#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 74de353..9344188 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -118,6 +118,27 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
}
}
+static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
+{
+ int agc, caldac;
+
+ if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ return;
+
+ if ((index != 5) || (chain >= 2))
+ return;
+
+ agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
+ if (!agc)
+ return;
+
+ caldac = ah->caldata->caldac[chain];
+ ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
+ caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
+ ah->caldata->rtt_table[chain][index] |= (caldac << 4);
+}
+
static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
{
u32 val;
@@ -155,13 +176,16 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
ah->caldata->rtt_table[chain][i] =
ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+
+ ar9003_hw_patch_rtt(ah, i, chain);
+
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"RTT value at idx %d, chain %d is: 0x%x\n",
i, chain, ah->caldata->rtt_table[chain][i]);
}
}
- ah->caldata->rtt_done = true;
+ set_bit(RTT_DONE, &ah->caldata->cal_flags);
}
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
@@ -176,7 +200,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
}
if (ah->caldata)
- ah->caldata->rtt_done = false;
+ clear_bit(RTT_DONE, &ah->caldata->cal_flags);
}
bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -186,11 +210,37 @@ bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ah->caldata)
return false;
- if (!ah->caldata->rtt_done)
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
+ if (IS_CHAN_2GHZ(chan)){
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ }
+
+ if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
return false;
ar9003_hw_rtt_enable(ah);
- ar9003_hw_rtt_set_mask(ah, 0x10);
+
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ ar9003_hw_rtt_set_mask(ah, 0x30);
+ else
+ ar9003_hw_rtt_set_mask(ah, 0x10);
if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 88ff1d7..6f899c6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,7 +20,17 @@
/* AR9485 1.1 */
-#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
/* Addr allmodes */
@@ -34,6 +44,7 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
{0x00009e00, 0x037216a0},
{0x00009e04, 0x00182020},
{0x00009e18, 0x00000000},
+ {0x00009e20, 0x000003a8},
{0x00009e2c, 0x00004121},
{0x00009e44, 0x02282324},
{0x0000a000, 0x00060005},
@@ -174,7 +185,7 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -200,14 +211,14 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -263,6 +274,11 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
@@ -297,6 +313,22 @@ static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -341,6 +373,100 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -427,7 +553,7 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
@@ -521,12 +647,15 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
-
static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
{0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
@@ -543,23 +672,39 @@ static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
{0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
{0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
{0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
+ {0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -823,6 +968,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x00009e00, 0x03721b20},
{0x00009e04, 0x00082020},
{0x00009e18, 0x0300501e},
+ {0x00009e20, 0x000003ba},
{0x00009e2c, 0x00002e21},
{0x00009e44, 0x02182324},
{0x0000a000, 0x00060005},
@@ -1001,7 +1147,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1020,7 +1165,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1206,6 +1351,11 @@ static const u32 ar9485_1_1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index e85a8b0..a8c757b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -272,9 +272,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
+ {0x0000a3a4, 0x00000011},
+ {0x0000a3a8, 0xaaaaaa6e},
+ {0x0000a3ac, 0x3c466455},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -295,11 +295,11 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a404, 0x00000000},
{0x0000a408, 0x0e79e5c6},
{0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
+ {0x0000a414, 0x1ce739c5},
{0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
+ {0x0000a41c, 0x1ce739c5},
{0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
+ {0x0000a424, 0x1ce739c5},
{0x0000a428, 0x000001ce},
{0x0000a42c, 0x1ce739ce},
{0x0000a430, 0x1ce739ce},
@@ -351,9 +351,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -452,6 +452,7 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
/* Addr allmodes */
{0x00004050, 0x00300300},
{0x0000406c, 0x00100000},
+ {0x00009e20, 0x000003b6},
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@@ -1230,4 +1231,11 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
+static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 2ee35f6..5492a0c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -64,7 +64,6 @@ struct ath_node;
struct ath_config {
u16 txpowlimit;
- u8 cabqReadytime;
};
/*************************/
@@ -207,6 +206,14 @@ struct ath_frame_info {
u8 baw_tracked : 1;
};
+struct ath_rxbuf {
+ struct list_head list;
+ struct sk_buff *bf_mpdu;
+ void *bf_desc;
+ dma_addr_t bf_daddr;
+ dma_addr_t bf_buf_addr;
+};
+
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
@@ -307,7 +314,7 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
- struct ath_buf *buf_hold;
+ struct ath_rxbuf *buf_hold;
struct sk_buff *frag;
u32 ampdu_ref;
@@ -459,8 +466,8 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
#define ATH_DUMP_BTCOEX(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, \
- "%20s : %10d\n", _s, (_val)); \
+ len += scnprintf(buf + len, size - len, \
+ "%20s : %10d\n", _s, (_val)); \
} while (0)
enum bt_op_flags {
@@ -581,7 +588,6 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
-#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
@@ -626,12 +632,15 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
/* Main driver core */
/********************/
-#define ATH9K_PCI_CUS198 0x0001
-#define ATH9K_PCI_CUS230 0x0002
-#define ATH9K_PCI_CUS217 0x0004
-#define ATH9K_PCI_WOW 0x0008
-#define ATH9K_PCI_BT_ANT_DIV 0x0010
-#define ATH9K_PCI_D3_L1_WAR 0x0020
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_CUS252 0x0008
+#define ATH9K_PCI_WOW 0x0010
+#define ATH9K_PCI_BT_ANT_DIV 0x0020
+#define ATH9K_PCI_D3_L1_WAR 0x0040
+#define ATH9K_PCI_AR9565_1ANT 0x0080
+#define ATH9K_PCI_AR9565_2ANT 0x0100
/*
* Default cache line size, in bytes.
@@ -924,7 +933,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-bool ath9k_uses_beacons(int type);
void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
enum spectral_mode spectral_mode);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b5c16b3a..17be353 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -334,6 +334,8 @@ void ath9k_beacon_tasklet(unsigned long data)
if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
sc->beacon.bmisscnt++;
+ ath9k_hw_check_nav(ah);
+
if (!ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 5e8219a..d8db74b 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -119,7 +119,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
- (cal->nfcal_interference ?
+ (test_bit(NFCAL_INTF, &cal->cal_flags) ?
"not corrected (due to interference)" :
"correcting to MAX"));
@@ -130,7 +130,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* we bypass this limit here in order to better deal
* with our environment.
*/
- if (!cal->nfcal_interference)
+ if (!test_bit(NFCAL_INTF, &cal->cal_flags))
h[i].privNF = limit->max;
}
}
@@ -141,7 +141,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* Re-enable the enforcement of the NF maximum again.
*/
if (!high_nf_mid)
- cal->nfcal_interference = false;
+ clear_bit(NFCAL_INTF, &cal->cal_flags);
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -186,7 +186,6 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
if (!ah->caldata)
@@ -208,7 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
- currCal->calData->calType, conf->chandef.chan->center_freq);
+ currCal->calData->calType, ah->curchan->chan->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
@@ -220,7 +219,7 @@ EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
{
if (ah->caldata)
- ah->caldata->nfcal_pending = true;
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
@@ -242,7 +241,6 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
int32_t val;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
if (ah->caldata)
@@ -252,7 +250,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (chainmask & (1 << i)) {
s16 nfval;
- if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
if (h)
@@ -314,7 +312,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
- if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
val = REG_READ(ah, ah->nf_regs[i]);
@@ -391,7 +389,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
h = caldata->nfCalHist;
- caldata->nfcal_pending = false;
+ clear_bit(NFCAL_PENDING, &caldata->cal_flags);
ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
chan->noisefloor = h[0].privNF;
ah->noise = ath9k_hw_getchan_noise(ah, chan);
@@ -408,7 +406,6 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags;
- ah->caldata->chanmode = chan->chanmode;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -437,12 +434,12 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
* the baseband update the internal NF value itself, similar to
* what is being done after a full reset.
*/
- if (!caldata->nfcal_pending)
+ if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
ath9k_hw_start_nfcal(ah, true);
else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
- caldata->nfcal_interference = true;
+ set_bit(NFCAL_INTF, &caldata->cal_flags);
}
EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index d3063c2..a7e5a05 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,103 +49,64 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
}
EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
-static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
-{
- u32 chanmode = 0;
-
- switch (chandef->chan->band) {
- case IEEE80211_BAND_2GHZ:
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- chanmode = CHANNEL_G_HT20;
- break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chanmode = CHANNEL_G_HT40PLUS;
- else
- chanmode = CHANNEL_G_HT40MINUS;
- break;
- default:
- break;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- chanmode = CHANNEL_A_HT20;
- break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chanmode = CHANNEL_A_HT40PLUS;
- else
- chanmode = CHANNEL_A_HT40MINUS;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
-
- return chanmode;
-}
-
/*
* Update internal channel flags.
*/
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct cfg80211_chan_def *chandef)
+static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+ struct cfg80211_chan_def *chandef)
{
- ichan->channel = chandef->chan->center_freq;
- ichan->chan = chandef->chan;
-
- if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
- ichan->chanmode = CHANNEL_G;
- ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
- } else {
- ichan->chanmode = CHANNEL_A;
- ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
- }
+ struct ieee80211_channel *chan = chandef->chan;
+ u16 flags = 0;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_5GHZ)
+ flags |= CHANNEL_5GHZ;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_5:
- ichan->channelFlags |= CHANNEL_QUARTER;
+ flags |= CHANNEL_QUARTER;
break;
case NL80211_CHAN_WIDTH_10:
- ichan->channelFlags |= CHANNEL_HALF;
+ flags |= CHANNEL_HALF;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
break;
case NL80211_CHAN_WIDTH_20:
+ flags |= CHANNEL_HT;
+ break;
case NL80211_CHAN_WIDTH_40:
- ichan->chanmode = ath9k_get_extchanmode(chandef);
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ flags |= CHANNEL_HT40PLUS | CHANNEL_HT;
+ else
+ flags |= CHANNEL_HT40MINUS | CHANNEL_HT;
break;
default:
WARN_ON(1);
}
+
+ ichan->channelFlags = flags;
}
-EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
/*
* Get the internal channel reference.
*/
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
- struct ath_hw *ah)
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef)
{
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+ struct ieee80211_channel *curchan = chandef->chan;
struct ath9k_channel *channel;
u8 chan_idx;
chan_idx = curchan->hw_value;
channel = &ah->channels[chan_idx];
- ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
+ ath9k_cmn_update_ichannel(channel, chandef);
return channel;
}
-EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+EXPORT_SYMBOL(ath9k_cmn_get_channel);
int ath9k_cmn_count_streams(unsigned int chainmask, int max)
{
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e039bcb..eb85e1b 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -43,10 +43,9 @@
(((x) + ((mul)/2)) / (mul))
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct cfg80211_chan_def *chandef);
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
- struct ath_hw *ah);
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef);
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c088744..1be2c78 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -104,37 +104,37 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
return -ENOMEM;
if (common->disable_ani) {
- len += snprintf(buf + len, size - len, "%s: %s\n",
- "ANI", "DISABLED");
+ len += scnprintf(buf + len, size - len, "%s: %s\n",
+ "ANI", "DISABLED");
goto exit;
}
- len += snprintf(buf + len, size - len, "%15s: %s\n",
- "ANI", "ENABLED");
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "ANI RESET", ah->stats.ast_ani_reset);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR UP", ah->stats.ast_ani_spurup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR DOWN", ah->stats.ast_ani_spurup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK ON", ah->stats.ast_ani_ccklow);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP UP", ah->stats.ast_ani_stepup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+ len += scnprintf(buf + len, size - len, "%15s: %s\n",
+ "ANI", "ENABLED");
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "ANI RESET", ah->stats.ast_ani_reset);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR UP", ah->stats.ast_ani_spurup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR DOWN", ah->stats.ast_ani_spurup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK ON", ah->stats.ast_ani_ccklow);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP UP", ah->stats.ast_ani_stepup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "CCK ERRORS", ah->stats.ast_ani_cckerrs);
exit:
if (len > size)
len = size;
@@ -280,70 +280,70 @@ static ssize_t read_file_antenna_diversity(struct file *file,
return -ENOMEM;
if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
- len += snprintf(buf + len, size - len, "%s\n",
- "Antenna Diversity Combining is disabled");
+ len += scnprintf(buf + len, size - len, "%s\n",
+ "Antenna Diversity Combining is disabled");
goto exit;
}
ath9k_ps_wakeup(sc);
ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
- len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
- lna_conf_str[div_ant_conf.main_lna_conf]);
- len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
- lna_conf_str[div_ant_conf.alt_lna_conf]);
- len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
- as_main->rssi_avg);
- len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
- as_alt->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
+ lna_conf_str[div_ant_conf.main_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Current ALT config : %s\n",
+ lna_conf_str[div_ant_conf.alt_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
+ as_main->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
+ as_alt->rssi_avg);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
- len += snprintf(buf + len, size - len, "-------------------\n");
-
- len += snprintf(buf + len, size - len, "%30s%15s\n",
- "MAIN", "ALT");
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "TOTAL COUNT",
- as_main->recv_cnt,
- as_alt->recv_cnt);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 + LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 - LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
-
- len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
- len += snprintf(buf + len, size - len, "--------------------\n");
-
- len += snprintf(buf + len, size - len, "%30s%15s\n",
- "MAIN", "ALT");
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 + LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 - LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+ len += scnprintf(buf + len, size - len, "-------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "TOTAL COUNT",
+ as_main->recv_cnt,
+ as_alt->recv_cnt);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+ len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+ len += scnprintf(buf + len, size - len, "--------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
exit:
if (len > size)
@@ -385,21 +385,21 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "Raw DMA Debug values:\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
- i, val[i]);
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
+ i, val[i]);
}
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
if (i == 8) {
@@ -412,39 +412,39 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "%2d %2x %1x %2x %2x\n",
- i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
- (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
- val[2] & (0x7 << (i * 3)) >> (i * 3),
- (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "%2d %2x %1x %2x %2x\n",
+ i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
+ val[2] & (0x7 << (i * 3)) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
- REG_READ_D(ah, AR_OBS_BUS_1));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
+ REG_READ_D(ah, AR_OBS_BUS_1));
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@@ -530,9 +530,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
#define PR_IS(a, s) \
do { \
- len += snprintf(buf + len, mxlen - len, \
- "%21s: %10u\n", a, \
- sc->debug.stats.istats.s); \
+ len += scnprintf(buf + len, mxlen - len, \
+ "%21s: %10u\n", a, \
+ sc->debug.stats.istats.s); \
} while (0)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -563,8 +563,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("GENTIMER", gen_timer);
PR_IS("TOTAL", total);
- len += snprintf(buf + len, mxlen - len,
- "SYNC_CAUSE stats:\n");
+ len += scnprintf(buf + len, mxlen - len,
+ "SYNC_CAUSE stats:\n");
PR_IS("Sync-All", sync_cause_all);
PR_IS("RTC-IRQ", sync_rtc_irq);
@@ -655,16 +655,16 @@ static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len, "%s: %d ",
- "qnum", txq->axq_qnum);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "qdepth", txq->axq_depth);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "ampdu-depth", txq->axq_ampdu_depth);
- len += snprintf(buf + len, size - len, "%s: %3d ",
- "pending", txq->pending_frames);
- len += snprintf(buf + len, size - len, "%s: %d\n",
- "stopped", txq->stopped);
+ len += scnprintf(buf + len, size - len, "%s: %d ",
+ "qnum", txq->axq_qnum);
+ len += scnprintf(buf + len, size - len, "%s: %2d ",
+ "qdepth", txq->axq_depth);
+ len += scnprintf(buf + len, size - len, "%s: %2d ",
+ "ampdu-depth", txq->axq_ampdu_depth);
+ len += scnprintf(buf + len, size - len, "%s: %3d ",
+ "pending", txq->pending_frames);
+ len += scnprintf(buf + len, size - len, "%s: %d\n",
+ "stopped", txq->stopped);
ath_txq_unlock(sc, txq);
return len;
@@ -687,11 +687,11 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
- len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
+ len += scnprintf(buf + len, size - len, "(%s): ", qname[i]);
len += print_queue(sc, txq, buf + len, size - len);
}
- len += snprintf(buf + len, size - len, "(CAB): ");
+ len += scnprintf(buf + len, size - len, "(CAB): ");
len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
if (len > size)
@@ -716,80 +716,82 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
unsigned int reg;
u32 rxfilter;
- len += snprintf(buf + len, sizeof(buf) - len,
- "BSSID: %pM\n", common->curbssid);
- len += snprintf(buf + len, sizeof(buf) - len,
- "BSSID-MASK: %pM\n", common->bssidmask);
- len += snprintf(buf + len, sizeof(buf) - len,
- "OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "BSSID: %pM\n", common->curbssid);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "BSSID-MASK: %pM\n", common->bssidmask);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "OPMODE: %s\n",
+ ath_opmode_to_string(sc->sc_ah->opmode));
ath9k_ps_wakeup(sc);
rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, sizeof(buf) - len,
- "RXFILTER: 0x%x", rxfilter);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "RXFILTER: 0x%x", rxfilter);
if (rxfilter & ATH9K_RX_FILTER_UCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (rxfilter & ATH9K_RX_FILTER_MCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (rxfilter & ATH9K_RX_FILTER_BCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (rxfilter & ATH9K_RX_FILTER_CONTROL)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (rxfilter & ATH9K_RX_FILTER_BEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (rxfilter & ATH9K_RX_FILTER_PROM)
- len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
- len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (rxfilter & ATH9K_RX_FILTER_PHYERR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
- len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
- len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
reg = sc->sc_ah->imask;
- len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "INTERRUPT-MASK: 0x%x", reg);
if (reg & ATH9K_INT_SWBA)
- len += snprintf(buf + len, sizeof(buf) - len, " SWBA");
+ len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
if (reg & ATH9K_INT_BMISS)
- len += snprintf(buf + len, sizeof(buf) - len, " BMISS");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
if (reg & ATH9K_INT_CST)
- len += snprintf(buf + len, sizeof(buf) - len, " CST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CST");
if (reg & ATH9K_INT_RX)
- len += snprintf(buf + len, sizeof(buf) - len, " RX");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RX");
if (reg & ATH9K_INT_RXHP)
- len += snprintf(buf + len, sizeof(buf) - len, " RXHP");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
if (reg & ATH9K_INT_RXLP)
- len += snprintf(buf + len, sizeof(buf) - len, " RXLP");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
if (reg & ATH9K_INT_BB_WATCHDOG)
- len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
ath9k_calculate_iter_data(hw, NULL, &iter_data);
- len += snprintf(buf + len, sizeof(buf) - len,
- "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
- " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
- iter_data.naps, iter_data.nstations, iter_data.nmeshes,
- iter_data.nwds, iter_data.nadhocs,
- sc->nvifs, sc->nbcnvifs);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
+ " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+ iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+ iter_data.nwds, iter_data.nadhocs,
+ sc->nvifs, sc->nbcnvifs);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -805,27 +807,27 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Hang",
- sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Watchdog",
- sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Fatal HW Error",
- sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX HW error",
- sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX Path Hang",
- sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "PLL RX Hang",
- sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "MCI Reset",
- sc->debug.stats.reset[RESET_TYPE_MCI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Hang",
+ sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Watchdog",
+ sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Fatal HW Error",
+ sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX HW error",
+ sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX Path Hang",
+ sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "PLL RX Hang",
+ sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "MCI Reset",
+ sc->debug.stats.reset[RESET_TYPE_MCI]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -902,14 +904,14 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.phy_err_stats[p]);
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
#define RXS_ERR(s, e) \
do { \
- len += snprintf(buf + len, size - len, \
- "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.e); \
+ len += scnprintf(buf + len, size - len, \
+ "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.e);\
} while (0)
struct ath_softc *sc = file->private_data;
@@ -1439,22 +1441,22 @@ static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "Channel Noise Floor : %d\n", ah->noise);
- len += snprintf(buf + len, size - len,
- "Chain | privNF | # Readings | NF Readings\n");
+ len += scnprintf(buf + len, size - len,
+ "Channel Noise Floor : %d\n", ah->noise);
+ len += scnprintf(buf + len, size - len,
+ "Chain | privNF | # Readings | NF Readings\n");
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
continue;
nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
- len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
- i, h[i].privNF, nread);
+ len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
+ i, h[i].privNF, nread);
for (j = 0; j < nread; j++)
- len += snprintf(buf + len, size - len,
- " %d", h[i].nfCalBuffer[j]);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " %d", h[i].nfCalBuffer[j]);
+ len += scnprintf(buf + len, size - len, "\n");
}
if (len > size)
@@ -1543,8 +1545,8 @@ static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!sc->sc_ah->common.btcoex_enabled) {
- len = snprintf(buf, size, "%s\n",
- "BTCOEX is disabled");
+ len = scnprintf(buf, size, "%s\n",
+ "BTCOEX is disabled");
goto exit;
}
@@ -1582,43 +1584,43 @@ static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!an->sta->ht_cap.ht_supported) {
- len = snprintf(buf, size, "%s\n",
- "HT not supported");
+ len = scnprintf(buf, size, "%s\n",
+ "HT not supported");
goto exit;
}
- len = snprintf(buf, size, "Max-AMPDU: %d\n",
- an->maxampdu);
- len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
- an->mpdudensity);
+ len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+ an->maxampdu);
+ len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+ an->mpdudensity);
- len += snprintf(buf + len, size - len,
- "%2s%7s\n", "AC", "SCHED");
+ len += scnprintf(buf + len, size - len,
+ "%2s%7s\n", "AC", "SCHED");
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
txq = ac->txq;
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len,
- "%2d%7d\n",
- acno, ac->sched);
+ len += scnprintf(buf + len, size - len,
+ "%2d%7d\n",
+ acno, ac->sched);
ath_txq_unlock(sc, txq);
}
- len += snprintf(buf + len, size - len,
- "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
- "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
- "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+ len += scnprintf(buf + len, size - len,
+ "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+ "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+ "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
txq = tid->ac->txq;
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len,
- "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
- tid->tidno, tid->seq_start, tid->seq_next,
- tid->baw_size, tid->baw_head, tid->baw_tail,
- tid->bar_index, tid->sched, tid->paused);
+ len += scnprintf(buf + len, size - len,
+ "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+ tid->tidno, tid->seq_start, tid->seq_next,
+ tid->baw_size, tid->baw_head, tid->baw_tail,
+ tid->bar_index, tid->sched, tid->paused);
ath_txq_unlock(sc, txq);
}
exit:
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 6e1556f..d6e3fa4 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -193,12 +193,12 @@ struct ath_tx_stats {
#define TXSTATS sc->debug.stats.txstats
#define PR(str, elem) \
do { \
- len += snprintf(buf + len, size - len, \
- "%s%13u%11u%10u%10u\n", str, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+ len += scnprintf(buf + len, size - len, \
+ "%s%13u%11u%10u%10u\n", str, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
} while(0)
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 3c6e413..8215991 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -25,11 +25,11 @@
struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
#define ATH9K_DFS_STAT(s, p) \
- len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
- sc->debug.stats.dfs_stats.p);
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
#define ATH9K_DFS_POOL_STAT(s, p) \
- len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
- global_dfs_pool_stats.p);
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ global_dfs_pool_stats.p);
static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -44,12 +44,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len, "DFS support for "
- "macVersion = 0x%x, macRev = 0x%x: %s\n",
- hw_ver->macVersion, hw_ver->macRev,
- (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ len += scnprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
"enabled" : "disabled");
- len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
ATH9K_DFS_STAT("pulse events reported ", pulses_total);
ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
@@ -59,11 +59,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
- len += snprintf(buf + len, size - len, "Radar detector statistics "
- "(current DFS region: %d)\n", sc->dfs_detector->region);
+ len += scnprintf(buf + len, size - len, "Radar detector statistics "
+ "(current DFS region: %d)\n",
+ sc->dfs_detector->region);
ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
ATH9K_DFS_STAT("Radars detected ", radar_detected);
- len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
index 5ba4b6f..c718fc3 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -392,7 +392,7 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
pri_detector_reset(de, ts);
- return false;
+ return NULL;
}
ps = pseq_handler_check_detection(de);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9ea8e4b..b409171 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -129,10 +129,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ath9k_dump_4k_modal_eeprom(buf, len, size,
- &eep->modalHeader);
+ &eep->modalHeader);
goto out;
}
@@ -160,8 +160,8 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("TX Gain type", pBase->txGainType);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 3ae1f3d..e1d0c21 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -125,8 +125,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ar9287_dump_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
@@ -157,8 +157,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Power Table Offset", pBase->pwrTableOffset);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 1c25368..39107e3 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -205,12 +205,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[0]);
- len += snprintf(buf + len, size - len,
- "%20s :\n", "5GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[1]);
goto out;
@@ -240,8 +240,8 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4b412aa..c34f212 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -522,22 +522,22 @@ static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
- len += snprintf(buf + len, size - len, "BT Weights: ");
+ len += scnprintf(buf + len, size - len, "BT Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- len += snprintf(buf + len, size - len, "%08x ",
- btcoex_hw->bt_weight[i]);
- len += snprintf(buf + len, size - len, "\n");
- len += snprintf(buf + len, size - len, "WLAN Weights: ");
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->bt_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "WLAN Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- len += snprintf(buf + len, size - len, "%08x ",
- btcoex_hw->wlan_weight[i]);
- len += snprintf(buf + len, size - len, "\n");
- len += snprintf(buf + len, size - len, "Tx Priorities: ");
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->wlan_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "Tx Priorities: ");
for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
- len += snprintf(buf + len, size - len, "%08x ",
+ len += scnprintf(buf + len, size - len, "%08x ",
btcoex_hw->tx_prio[i]);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "\n");
return len;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index c1b45e2..fb071ee 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -37,29 +37,29 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RX",
- be32_to_cpu(cmd_rsp.rx));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RX",
+ be32_to_cpu(cmd_rsp.rx));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RXORN",
- be32_to_cpu(cmd_rsp.rxorn));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXORN",
+ be32_to_cpu(cmd_rsp.rxorn));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RXEOL",
- be32_to_cpu(cmd_rsp.rxeol));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXEOL",
+ be32_to_cpu(cmd_rsp.rxeol));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TXURN",
- be32_to_cpu(cmd_rsp.txurn));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXURN",
+ be32_to_cpu(cmd_rsp.txurn));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TXTO",
- be32_to_cpu(cmd_rsp.txto));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXTO",
+ be32_to_cpu(cmd_rsp.txto));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "CST",
- be32_to_cpu(cmd_rsp.cst));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CST",
+ be32_to_cpu(cmd_rsp.cst));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -95,41 +95,41 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Xretries",
- be32_to_cpu(cmd_rsp.xretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Xretries",
+ be32_to_cpu(cmd_rsp.xretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "FifoErr",
- be32_to_cpu(cmd_rsp.fifoerr));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "FifoErr",
+ be32_to_cpu(cmd_rsp.fifoerr));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Filtered",
- be32_to_cpu(cmd_rsp.filtered));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Filtered",
+ be32_to_cpu(cmd_rsp.filtered));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TimerExp",
- be32_to_cpu(cmd_rsp.timer_exp));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TimerExp",
+ be32_to_cpu(cmd_rsp.timer_exp));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "ShortRetries",
- be32_to_cpu(cmd_rsp.shortretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "ShortRetries",
+ be32_to_cpu(cmd_rsp.shortretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "LongRetries",
- be32_to_cpu(cmd_rsp.longretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "LongRetries",
+ be32_to_cpu(cmd_rsp.longretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "QueueNull",
- be32_to_cpu(cmd_rsp.qnull));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "QueueNull",
+ be32_to_cpu(cmd_rsp.qnull));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "EncapFail",
- be32_to_cpu(cmd_rsp.encap_fail));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "EncapFail",
+ be32_to_cpu(cmd_rsp.encap_fail));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "NoBuf",
- be32_to_cpu(cmd_rsp.nobuf));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -165,17 +165,17 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "NoBuf",
- be32_to_cpu(cmd_rsp.nobuf));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "HostSend",
- be32_to_cpu(cmd_rsp.host_send));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostSend",
+ be32_to_cpu(cmd_rsp.host_send));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "HostDone",
- be32_to_cpu(cmd_rsp.host_done));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostDone",
+ be32_to_cpu(cmd_rsp.host_done));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -197,37 +197,37 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Buffers queued",
- priv->debug.tx_stats.buf_queued);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Buffers completed",
- priv->debug.tx_stats.buf_completed);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs queued",
- priv->debug.tx_stats.skb_queued);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs success",
- priv->debug.tx_stats.skb_success);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs failed",
- priv->debug.tx_stats.skb_failed);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "CAB queued",
- priv->debug.tx_stats.cab_queued);
-
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "BE queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "BK queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "VI queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "VO queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs success",
+ priv->debug.tx_stats.skb_success);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs failed",
+ priv->debug.tx_stats.skb_failed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CAB queued",
+ priv->debug.tx_stats.cab_queued);
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BE queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BK queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VI queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VO queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -273,8 +273,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%20s : %10u\n", s, \
- priv->debug.rx_stats.err_phy_stats[p]);
+ len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
+ priv->debug.rx_stats.err_phy_stats[p]);
struct ath9k_htc_priv *priv = file->private_data;
char *buf;
@@ -285,37 +285,37 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs allocated",
- priv->debug.rx_stats.skb_allocated);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs completed",
- priv->debug.rx_stats.skb_completed);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs Dropped",
- priv->debug.rx_stats.skb_dropped);
-
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "CRC ERR",
- priv->debug.rx_stats.err_crc);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT CRC ERR",
- priv->debug.rx_stats.err_decrypt_crc);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "MIC ERR",
- priv->debug.rx_stats.err_mic);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "PRE-DELIM CRC ERR",
- priv->debug.rx_stats.err_pre_delim);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "POST-DELIM CRC ERR",
- priv->debug.rx_stats.err_post_delim);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT BUSY ERR",
- priv->debug.rx_stats.err_decrypt_busy);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "TOTAL PHY ERR",
- priv->debug.rx_stats.err_phy);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.rx_stats.skb_completed);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.rx_stats.skb_dropped);
+
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "CRC ERR",
+ priv->debug.rx_stats.err_crc);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "DECRYPT CRC ERR",
+ priv->debug.rx_stats.err_decrypt_crc);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "MIC ERR",
+ priv->debug.rx_stats.err_mic);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "PRE-DELIM CRC ERR",
+ priv->debug.rx_stats.err_pre_delim);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "POST-DELIM CRC ERR",
+ priv->debug.rx_stats.err_post_delim);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "DECRYPT BUSY ERR",
+ priv->debug.rx_stats.err_decrypt_busy);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "TOTAL PHY ERR",
+ priv->debug.rx_stats.err_phy);
PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
@@ -372,16 +372,16 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
spin_lock_bh(&priv->tx.tx_lock);
- len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
+ len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
priv->tx.tx_slot, MAX_TX_BUF_NUM);
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
- len += snprintf(buf + len, sizeof(buf) - len,
- "Used slots : %d\n",
- bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "Used slots : %d\n",
+ bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
spin_unlock_bh(&priv->tx.tx_lock);
@@ -405,30 +405,30 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Failed queue", skb_queue_len(&priv->tx.tx_failed));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Failed queue", skb_queue_len(&priv->tx.tx_failed));
spin_lock_bh(&priv->tx.tx_lock);
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Queued count", priv->tx.queued_cnt);
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Queued count", priv->tx.queued_cnt);
spin_unlock_bh(&priv->tx.tx_lock);
if (len > sizeof(buf))
@@ -507,70 +507,70 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Major Version",
- pBase->version >> 12);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Minor Version",
- pBase->version & 0xFFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Checksum",
- pBase->checksum);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Length",
- pBase->length);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain1",
- pBase->regDmn[0]);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain2",
- pBase->regDmn[1]);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Mask", pBase->txMask);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "RX Mask", pBase->rxMask);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 5GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 2GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Big Endian",
- !!(pBase->eepMisc & 0x01));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Major Ver",
- (pBase->binBuildNumber >> 24) & 0xFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Minor Ver",
- (pBase->binBuildNumber >> 16) & 0xFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Build",
- (pBase->binBuildNumber >> 8) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Major Version",
+ pBase->version >> 12);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Minor Version",
+ pBase->version & 0xFFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Checksum",
+ pBase->checksum);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Length",
+ pBase->length);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "RegDomain1",
+ pBase->regDmn[0]);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "RegDomain2",
+ pBase->regDmn[1]);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "TX Mask", pBase->txMask);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "RX Mask", pBase->rxMask);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Allow 5GHz",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Allow 2GHz",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 2GHz HT20",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 2GHz HT40",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 5Ghz HT20",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 5Ghz HT40",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Big Endian",
+ !!(pBase->eepMisc & 0x01));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Major Ver",
+ (pBase->binBuildNumber >> 24) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Minor Ver",
+ (pBase->binBuildNumber >> 16) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Build",
+ (pBase->binBuildNumber >> 8) & 0xFF);
/*
* UB91 specific data.
@@ -579,10 +579,10 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_header_4k *pBase4k =
&priv->ah->eeprom.map4k.baseEepHeader;
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Gain type",
- pBase4k->txGainType);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "TX Gain type",
+ pBase4k->txGainType);
}
/*
@@ -592,19 +592,19 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_ar9287_header *pBase9287 =
&priv->ah->eeprom.map9287.baseEepHeader;
- len += snprintf(buf + len, size - len,
- "%20s : %10ddB\n",
- "Power Table Offset",
- pBase9287->pwrTableOffset);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10ddB\n",
+ "Power Table Offset",
+ pBase9287->pwrTableOffset);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "OpenLoop Power Ctrl",
- pBase9287->openLoopPwrCntl);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "OpenLoop Power Ctrl",
+ pBase9287->openLoopPwrCntl);
}
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
if (len > size)
len = size;
@@ -627,8 +627,8 @@ static ssize_t read_4k_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
@@ -708,12 +708,12 @@ static ssize_t read_def_modal_eeprom(struct file *file,
do { \
if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
pModal = &priv->ah->eeprom.def.modalHeader[1]; \
- len += snprintf(buf + len, size - len, "%20s : %8d%7s", \
- _s, (_val), "|"); \
+ len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
+ _s, (_val), "|"); \
} \
if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
pModal = &priv->ah->eeprom.def.modalHeader[0]; \
- len += snprintf(buf + len, size - len, "%9d\n", \
+ len += scnprintf(buf + len, size - len, "%9d\n",\
(_val)); \
} \
} while (0)
@@ -729,10 +729,10 @@ static ssize_t read_def_modal_eeprom(struct file *file,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%31s %15s\n", "2G", "5G");
- len += snprintf(buf + len, size - len,
- "%32s %16s\n", "====", "====\n");
+ len += scnprintf(buf + len, size - len,
+ "%31s %15s\n", "2G", "5G");
+ len += scnprintf(buf + len, size - len,
+ "%32s %16s\n", "====", "====\n");
PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
@@ -814,8 +814,8 @@ static ssize_t read_9287_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index d442581..9a2657f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,30 +24,10 @@
static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
struct ath9k_channel *ichan)
{
- enum htc_phymode mode;
-
- mode = -EINVAL;
-
- switch (ichan->chanmode) {
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- mode = HTC_MODE_11NG;
- break;
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- mode = HTC_MODE_11NA;
- break;
- default:
- break;
- }
+ if (IS_CHAN_5GHZ(ichan))
+ return HTC_MODE_11NA;
- WARN_ON(mode < 0);
-
- return mode;
+ return HTC_MODE_11NG;
}
bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
@@ -926,7 +906,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
WMI_CMD(WMI_FLUSH_RECV_CMDID);
/* setup initial channel */
- init_channel = ath9k_cmn_get_curchannel(hw, ah);
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (ret) {
@@ -1208,9 +1188,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
- ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
- &hw->conf.chandef);
-
+ ath9k_cmn_get_channel(hw, priv->ah, &hw->conf.chandef);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ecc6ec4..dcdbab4 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -130,29 +130,29 @@ void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
static void ath9k_hw_set_clockrate(struct ath_hw *ah)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
unsigned int clockrate;
/* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
clockrate = 117;
- else if (!ah->curchan) /* should really check for CCK instead */
+ else if (!chan) /* should really check for CCK instead */
clockrate = ATH9K_CLOCK_RATE_CCK;
- else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ)
+ else if (IS_CHAN_2GHZ(chan))
clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
else
clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
- if (conf_is_ht40(conf))
+ if (IS_CHAN_HT40(chan))
clockrate *= 2;
if (ah->curchan) {
- if (IS_CHAN_HALF_RATE(ah->curchan))
+ if (IS_CHAN_HALF_RATE(chan))
clockrate /= 2;
- if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ if (IS_CHAN_QUARTER_RATE(chan))
clockrate /= 4;
}
@@ -190,10 +190,7 @@ EXPORT_SYMBOL(ath9k_hw_wait);
void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
int hw_delay)
{
- if (IS_CHAN_B(chan))
- hw_delay = (4 * hw_delay) / 22;
- else
- hw_delay /= 10;
+ hw_delay /= 10;
if (IS_CHAN_HALF_RATE(chan))
hw_delay *= 2;
@@ -294,8 +291,7 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
return;
}
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS)) {
+ if (IS_CHAN_HT40PLUS(chan)) {
centers->synth_center =
chan->channel + HT40_CHANNEL_CENTER_SHIFT;
extoff = 1;
@@ -549,6 +545,18 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ath9k_hw_ani_init(ah);
+ /*
+ * EEPROM needs to be initialized before we do this.
+ * This is required for regulatory compliance.
+ */
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
+ if ((regdmn & 0xF0) == CTL_FCC) {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+ }
+ }
+
return 0;
}
@@ -1030,7 +1038,6 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
const struct ath9k_channel *chan = ah->curchan;
int acktimeout, ctstimeout, ack_offset = 0;
int slottime;
@@ -1105,8 +1112,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
* BA frames in some implementations, but it has been found to fix ACK
* timeout issues in other cases as well.
*/
- if (conf->chandef.chan &&
- conf->chandef.chan->band == IEEE80211_BAND_2GHZ &&
+ if (IS_CHAN_2GHZ(chan) &&
!IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
acktimeout += 64 - sifstime - ah->slottime;
ctstimeout += 48 - sifstime - ah->slottime;
@@ -1148,9 +1154,7 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
- if (IS_CHAN_B(chan))
- ctl |= CTL_11B;
- else if (IS_CHAN_G(chan))
+ if (IS_CHAN_2GHZ(chan))
ctl |= CTL_11G;
else
ctl |= CTL_11A;
@@ -1498,10 +1502,8 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
int r;
if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
- u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
- u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
- band_switch = (cur != new);
- mode_diff = (chan->chanmode != ah->curchan->chanmode);
+ band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
+ mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
}
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1540,9 +1542,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
ath9k_hw_set_clockrate(ah);
ath9k_hw_apply_txpower(ah, chan, false);
- if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
- ath9k_hw_set_delta_slope(ah, chan);
-
+ ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
if (band_switch || ini_reloaded)
@@ -1644,6 +1644,19 @@ hang_check_iter:
return true;
}
+void ath9k_hw_check_nav(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 val;
+
+ val = REG_READ(ah, AR_NAV);
+ if (val != 0xdeadbeef && val > 0x7fff) {
+ ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
+ REG_WRITE(ah, AR_NAV, 0);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_check_nav);
+
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
@@ -1799,20 +1812,11 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
goto fail;
/*
- * If cross-band fcc is not supoprted, bail out if
- * either channelFlags or chanmode differ.
- *
- * chanmode will be different if the HT operating mode
- * changes because of CSA.
+ * If cross-band fcc is not supoprted, bail out if channelFlags differ.
*/
- if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
- if ((chan->channelFlags & CHANNEL_ALL) !=
- (ah->curchan->channelFlags & CHANNEL_ALL))
- goto fail;
-
- if (chan->chanmode != ah->curchan->chanmode)
- goto fail;
- }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
+ chan->channelFlags != ah->curchan->channelFlags)
+ goto fail;
if (!ath9k_hw_check_alive(ah))
goto fail;
@@ -1822,9 +1826,9 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* re-using are present.
*/
if (AR_SREV_9462(ah) && (ah->caldata &&
- (!ah->caldata->done_txiqcal_once ||
- !ah->caldata->done_txclcal_once ||
- !ah->caldata->rtt_done)))
+ (!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(RTT_DONE, &ah->caldata->cal_flags))))
goto fail;
ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1874,13 +1878,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->caldata = caldata;
if (caldata && (chan->channel != caldata->channel ||
- chan->channelFlags != caldata->channelFlags ||
- chan->chanmode != caldata->chanmode)) {
+ chan->channelFlags != caldata->channelFlags)) {
/* Operating channel changed, reset channel calibration data */
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
} else if (caldata) {
- caldata->paprd_packet_sent = false;
+ clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
}
ah->noise = ath9k_hw_getchan_noise(ah, chan);
@@ -1964,9 +1967,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_mfp(ah);
- if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
- ath9k_hw_set_delta_slope(ah, chan);
-
+ ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
@@ -2017,8 +2018,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_bb(ah, chan);
if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
@@ -2943,12 +2944,11 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
}
EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah)
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
u32 macmode;
- if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
+ if (IS_CHAN_HT40(chan) && !ah->config.cwm_ignore_extcca)
macmode = AR_2040_JOINED_RX_CLEAR;
else
macmode = 0;
@@ -3240,19 +3240,19 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
/* chipsets >= AR9280 are single-chip */
if (AR_SREV_9280_20_OR_LATER(ah)) {
- used = snprintf(hw_name, len,
- "Atheros AR%s Rev:%x",
- ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
- ah->hw_version.macRev);
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev);
}
else {
- used = snprintf(hw_name, len,
- "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
- ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
- ah->hw_version.macRev,
- ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
- AR_RADIO_SREV_MAJOR)),
- ah->hw_version.phyRev);
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev,
+ ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
+ & AR_RADIO_SREV_MAJOR)),
+ ah->hw_version.phyRev);
}
hw_name[used] = '\0';
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 69a907b..81fcbc7 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -98,8 +98,8 @@
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
#define SM(_v, _f) (((_v) << _f##_S) & _f)
@@ -369,55 +369,30 @@ enum ath9k_int {
ATH9K_INT_NOCARD = 0xffffffff
};
-#define CHANNEL_CCK 0x00020
-#define CHANNEL_OFDM 0x00040
-#define CHANNEL_2GHZ 0x00080
-#define CHANNEL_5GHZ 0x00100
-#define CHANNEL_PASSIVE 0x00200
-#define CHANNEL_DYN 0x00400
-#define CHANNEL_HALF 0x04000
-#define CHANNEL_QUARTER 0x08000
-#define CHANNEL_HT20 0x10000
-#define CHANNEL_HT40PLUS 0x20000
-#define CHANNEL_HT40MINUS 0x40000
-
-#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
-#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
-#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
-#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
-#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
-#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_ALL \
- (CHANNEL_OFDM| \
- CHANNEL_CCK| \
- CHANNEL_2GHZ | \
- CHANNEL_5GHZ | \
- CHANNEL_HT20 | \
- CHANNEL_HT40PLUS | \
- CHANNEL_HT40MINUS)
-
#define MAX_RTT_TABLE_ENTRY 6
#define MAX_IQCAL_MEASUREMENT 8
#define MAX_CL_TAB_ENTRY 16
#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
+enum ath9k_cal_flags {
+ RTT_DONE,
+ PAPRD_PACKET_SENT,
+ PAPRD_DONE,
+ NFCAL_PENDING,
+ NFCAL_INTF,
+ TXIQCAL_DONE,
+ TXCLCAL_DONE,
+ SW_PKDET_DONE,
+};
+
struct ath9k_hw_cal_data {
u16 channel;
- u32 channelFlags;
- u32 chanmode;
+ u16 channelFlags;
+ unsigned long cal_flags;
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
- bool rtt_done;
- bool paprd_packet_sent;
- bool paprd_done;
- bool nfcal_pending;
- bool nfcal_interference;
- bool done_txiqcal_once;
- bool done_txclcal_once;
+ u8 caldac[2];
u16 small_signal_gain[AR9300_MAX_CHAINS];
u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
u32 num_measures[AR9300_MAX_CHAINS];
@@ -430,33 +405,34 @@ struct ath9k_hw_cal_data {
struct ath9k_channel {
struct ieee80211_channel *chan;
u16 channel;
- u32 channelFlags;
- u32 chanmode;
+ u16 channelFlags;
s16 noisefloor;
};
-#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
- (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
- (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
- (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
-#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
-#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
-#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
-#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
+#define CHANNEL_5GHZ BIT(0)
+#define CHANNEL_HALF BIT(1)
+#define CHANNEL_QUARTER BIT(2)
+#define CHANNEL_HT BIT(3)
+#define CHANNEL_HT40PLUS BIT(4)
+#define CHANNEL_HT40MINUS BIT(5)
+
+#define IS_CHAN_5GHZ(_c) (!!((_c)->channelFlags & CHANNEL_5GHZ))
+#define IS_CHAN_2GHZ(_c) (!IS_CHAN_5GHZ(_c))
+
+#define IS_CHAN_HALF_RATE(_c) (!!((_c)->channelFlags & CHANNEL_HALF))
+#define IS_CHAN_QUARTER_RATE(_c) (!!((_c)->channelFlags & CHANNEL_QUARTER))
#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
- ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
- ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
-
-/* These macros check chanmode and not channelFlags */
-#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
-#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
- ((_c)->chanmode == CHANNEL_G_HT20))
-#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
- ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
- ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
- ((_c)->chanmode == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
+ (IS_CHAN_5GHZ(_c) && ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
+
+#define IS_CHAN_HT(_c) ((_c)->channelFlags & CHANNEL_HT)
+
+#define IS_CHAN_HT20(_c) (IS_CHAN_HT(_c) && !IS_CHAN_HT40(_c))
+
+#define IS_CHAN_HT40(_c) \
+ (!!((_c)->channelFlags & (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)))
+
+#define IS_CHAN_HT40PLUS(_c) ((_c)->channelFlags & CHANNEL_HT40PLUS)
+#define IS_CHAN_HT40MINUS(_c) ((_c)->channelFlags & CHANNEL_HT40MINUS)
enum ath9k_power_mode {
ATH9K_PM_AWAKE = 0,
@@ -558,6 +534,7 @@ struct ath_hw_antcomb_conf {
u8 main_gaintb;
u8 alt_gaintb;
int lna1_lna2_delta;
+ int lna1_lna2_switch_delta;
u8 div_group;
};
@@ -1026,10 +1003,11 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah);
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
+void ath9k_hw_check_nav(struct ath_hw *ah);
bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 9a1f349..7df728f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -347,7 +347,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u8 *ds;
- struct ath_buf *bf;
int i, bsize, desc_len;
ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
@@ -399,33 +398,68 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
/* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
- if (!bf)
- return -ENOMEM;
+ if (is_tx) {
+ struct ath_buf *bf;
+
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ } else {
+ struct ath_rxbuf *bf;
+
+ bsize = sizeof(struct ath_rxbuf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
- for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += (desc_len * ndesc);
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
}
+ list_add_tail(&bf->list, head);
}
- list_add_tail(&bf->list, head);
}
return 0;
}
@@ -437,7 +471,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -547,6 +580,26 @@ static void ath9k_init_platform(struct ath_softc *sc)
if (sc->driver_data & ATH9K_PCI_CUS217)
ath_info(common, "CUS217 card detected\n");
+ if (sc->driver_data & ATH9K_PCI_CUS252)
+ ath_info(common, "CUS252 card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
+ ath_info(common, "WB335 1-ANT card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
+ ath_info(common, "WB335 2-ANT card detected\n");
+
+ /*
+ * Some WB335 cards do not support antenna diversity. Since
+ * we use a hardcoded value for AR9565 instead of using the
+ * EEPROM/OTP data, remove the combining feature from
+ * the HW capabilities bitmap.
+ */
+ if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
+ if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
+ }
+
if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
ath_info(common, "Set BT/WLAN RX diversity capability\n");
@@ -748,7 +801,7 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
- ath9k_cmn_update_ichannel(ah->curchan, &chandef);
+ ath9k_cmn_get_channel(sc->hw, ah, &chandef);
ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 2f831db..84a6064 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -184,7 +184,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
struct ath9k_hw_cal_data *caldata = ah->caldata;
int chain;
- if (!caldata || !caldata->paprd_done) {
+ if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
return;
}
@@ -256,7 +256,9 @@ void ath_paprd_calibrate(struct work_struct *work)
int len = 1800;
int ret;
- if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
+ if (!caldata ||
+ !test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
+ test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
return;
}
@@ -316,7 +318,7 @@ void ath_paprd_calibrate(struct work_struct *work)
kfree_skb(skb);
if (chain_ok) {
- caldata->paprd_done = true;
+ set_bit(PAPRD_DONE, &caldata->cal_flags);
ath_paprd_activate(sc);
}
@@ -343,7 +345,7 @@ void ath_ani_calibrate(unsigned long data)
u32 cal_interval, short_cal_interval, long_cal_interval;
unsigned long flags;
- if (ah->caldata && ah->caldata->nfcal_interference)
+ if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
long_cal_interval = ATH_LONG_CALINTERVAL_INT;
else
long_cal_interval = ATH_LONG_CALINTERVAL;
@@ -432,7 +434,7 @@ set_timer:
mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
- if (!ah->caldata->paprd_done) {
+ if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
ieee80211_queue_work(sc->hw, &sc->paprd_work);
} else if (!ah->paprd_table_write_done) {
ath9k_ps_wakeup(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index a3eff09..6a18f9d 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -374,7 +374,6 @@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_channel *chan = ah->curchan;
struct ath9k_tx_queue_info *qi;
u32 cwMin, chanCwMin, value;
@@ -387,10 +386,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
- if (chan && IS_CHAN_B(chan))
- chanCwMin = INIT_CWMIN_11B;
- else
- chanCwMin = INIT_CWMIN;
+ chanCwMin = INIT_CWMIN;
for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
} else
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index bfccace..e3eed81 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -603,8 +603,6 @@ enum ath9k_tx_queue_flags {
#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
#define ATH9K_DECOMP_MASK_SIZE 128
-#define ATH9K_READY_TIME_LO_BOUND 50
-#define ATH9K_READY_TIME_HI_BOUND 96
enum ath9k_pkt_type {
ATH9K_PKT_TYPE_NORMAL = 0,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e4f6590..c42b55c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -302,17 +302,91 @@ out:
* by reseting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
-static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *hchan)
+static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath9k_channel *hchan;
+ struct ieee80211_channel *chan = chandef->chan;
+ unsigned long flags;
+ bool offchannel;
+ int pos = chan->hw_value;
+ int old_pos = -1;
int r;
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
+ offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
+
+ if (ah->curchan)
+ old_pos = ah->curchan - &ah->channels[0];
+
+ ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+ chan->center_freq, chandef->width);
+
+ /* update survey stats for the old channel before switching */
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ ath9k_cmn_get_channel(hw, ah, chandef);
+
+ /*
+ * If the operating channel changes, change the survey in-use flags
+ * along with it.
+ * Reset the survey data for the new channel, unless we're switching
+ * back to the operating channel from an off-channel operation.
+ */
+ if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
+ if (sc->cur_survey)
+ sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+
+ sc->cur_survey = &sc->survey[pos];
+
+ memset(sc->cur_survey, 0, sizeof(struct survey_info));
+ sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+ } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
+ memset(&sc->survey[pos], 0, sizeof(struct survey_info));
+ }
+
+ hchan = &sc->sc_ah->channels[pos];
r = ath_reset_internal(sc, hchan);
+ if (r)
+ return r;
- return r;
+ /*
+ * The most recent snapshot of channel->noisefloor for the old
+ * channel is only available after the hardware reset. Copy it to
+ * the survey stats now.
+ */
+ if (old_pos >= 0)
+ ath_update_survey_nf(sc, old_pos);
+
+ /*
+ * Enable radar pulse detection if on a DFS channel. Spectral
+ * scanning and radar detection can not be used concurrently.
+ */
+ if (hw->conf.radar_enabled) {
+ u32 rxfilter;
+
+ /* set HW specific DFS configuration */
+ ath9k_hw_set_radar_params(ah);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR;
+ ath9k_hw_setrxfilter(ah, rxfilter);
+ ath_dbg(common, DFS, "DFS enabled at freq %d\n",
+ chan->center_freq);
+ } else {
+ /* perform spectral scan if requested. */
+ if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+ sc->spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_spectral_scan_trigger(hw);
+ }
+
+ return 0;
}
static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -362,6 +436,13 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type);
+
+ /*
+ * Increment the ref. counter here so that
+ * interrupts are enabled in the reset routine.
+ */
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
goto out;
}
@@ -400,10 +481,9 @@ void ath9k_tasklet(unsigned long data)
ath9k_btcoex_handle_interrupt(sc, status);
-out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
-
+out:
spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
}
@@ -595,7 +675,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
- init_channel = ath9k_cmn_get_curchannel(hw, ah);
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
/* Reset SERDES registers */
ath9k_hw_configpcipowersave(ah, false);
@@ -798,7 +878,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
}
if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+ ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
ath9k_hw_phy_disable(ah);
@@ -817,7 +897,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG, "Driver halt\n");
}
-bool ath9k_uses_beacons(int type)
+static bool ath9k_uses_beacons(int type)
{
switch (type) {
case NL80211_IFTYPE_AP:
@@ -1202,81 +1282,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
- int pos = curchan->hw_value;
- int old_pos = -1;
- unsigned long flags;
-
- if (ah->curchan)
- old_pos = ah->curchan - &ah->channels[0];
-
- ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
- curchan->center_freq, hw->conf.chandef.width);
-
- /* update survey stats for the old channel before switching */
- spin_lock_irqsave(&common->cc_lock, flags);
- ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
- &conf->chandef);
-
- /*
- * If the operating channel changes, change the survey in-use flags
- * along with it.
- * Reset the survey data for the new channel, unless we're switching
- * back to the operating channel from an off-channel operation.
- */
- if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
- sc->cur_survey != &sc->survey[pos]) {
-
- if (sc->cur_survey)
- sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
-
- sc->cur_survey = &sc->survey[pos];
-
- memset(sc->cur_survey, 0, sizeof(struct survey_info));
- sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
- } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
- memset(&sc->survey[pos], 0, sizeof(struct survey_info));
- }
-
- if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
+ if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
ath_err(common, "Unable to set channel\n");
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
return -EINVAL;
}
-
- /*
- * The most recent snapshot of channel->noisefloor for the old
- * channel is only available after the hardware reset. Copy it to
- * the survey stats now.
- */
- if (old_pos >= 0)
- ath_update_survey_nf(sc, old_pos);
-
- /*
- * Enable radar pulse detection if on a DFS channel. Spectral
- * scanning and radar detection can not be used concurrently.
- */
- if (hw->conf.radar_enabled) {
- u32 rxfilter;
-
- /* set HW specific DFS configuration */
- ath9k_hw_set_radar_params(ah);
- rxfilter = ath9k_hw_getrxfilter(ah);
- rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
- ATH9K_RX_FILTER_PHYERR;
- ath9k_hw_setrxfilter(ah, rxfilter);
- ath_dbg(common, DFS, "DFS enabled at freq %d\n",
- curchan->center_freq);
- } else {
- /* perform spectral scan if requested. */
- if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
- sc->spectral_mode == SPECTRAL_CHANSCAN)
- ath9k_spectral_scan_trigger(hw);
- }
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 815bee2..0ac1b5f 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -661,9 +661,9 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
chan_start = wlan_chan - 10;
chan_end = wlan_chan + 10;
- if (chan->chanmode == CHANNEL_G_HT40PLUS)
+ if (IS_CHAN_HT40PLUS(chan))
chan_end += 20;
- else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+ else if (IS_CHAN_HT40MINUS(chan))
chan_start -= 20;
/* adjust side band */
@@ -707,11 +707,11 @@ void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
if (setchannel) {
struct ath9k_hw_cal_data *caldata = &sc->caldata;
- if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+ if (IS_CHAN_HT40PLUS(ah->curchan) &&
(ah->curchan->channel > caldata->channel) &&
(ah->curchan->channel <= caldata->channel + 20))
return;
- if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+ if (IS_CHAN_HT40MINUS(ah->curchan) &&
(ah->curchan->channel < caldata->channel) &&
(ah->curchan->channel >= caldata->channel - 20))
return;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index d089a7c..7e4c252 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -269,7 +269,200 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
- { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
+
+ /* CUS252 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3028),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2176),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 1-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE068),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA119),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0632),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x6671),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2811),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2812),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+
+ /* WB335 1-ANT / Antenna Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3025),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302B),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE069),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3028),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0622),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0672),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0662),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x18E3),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x217F),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020E),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 2-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411A),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411D),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411E),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+
+ /* WB335 2-ANT / Antenna-Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0642),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0652),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0612),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2130),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x144F, /* ASKEY */
+ 0x7202),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2810),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* PCI-E AR9565 (WB335) */
+ { PCI_VDEVICE(ATHEROS, 0x0036),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
{ 0 }
};
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index d3d7c51..d829bb6 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1387,31 +1387,31 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
int used_mcs = 0, used_htmode = 0;
if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
- used_mcs = snprintf(mcs, 5, "%d",
- rc->rate_table->info[i].ratecode);
+ used_mcs = scnprintf(mcs, 5, "%d",
+ rc->rate_table->info[i].ratecode);
if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
- used_htmode = snprintf(htmode, 5, "HT40");
+ used_htmode = scnprintf(htmode, 5, "HT40");
else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
- used_htmode = snprintf(htmode, 5, "HT20");
+ used_htmode = scnprintf(htmode, 5, "HT20");
else
- used_htmode = snprintf(htmode, 5, "????");
+ used_htmode = scnprintf(htmode, 5, "????");
}
mcs[used_mcs] = '\0';
htmode[used_htmode] = '\0';
- len += snprintf(buf + len, max - len,
- "%6s %6s %3u.%d: "
- "%10u %10u %10u %10u\n",
- htmode,
- mcs,
- ratekbps / 1000,
- (ratekbps % 1000) / 100,
- stats->success,
- stats->retries,
- stats->xretries,
- stats->per);
+ len += scnprintf(buf + len, max - len,
+ "%6s %6s %3u.%d: "
+ "%10u %10u %10u %10u\n",
+ htmode,
+ mcs,
+ ratekbps / 1000,
+ (ratekbps % 1000) / 100,
+ stats->success,
+ stats->retries,
+ stats->xretries,
+ stats->per);
}
if (len > max)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4ee472a..a051641 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,7 +19,7 @@
#include "ath9k.h"
#include "ar9003_mac.h"
-#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
+#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
{
@@ -35,7 +35,7 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
* buffer (or rx fifo). This can incorrectly acknowledge packets
* to a sender if last desc is self-linked.
*/
-static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -68,7 +68,7 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
sc->rx.rxlink = &ds->ds_link;
}
-static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
{
if (sc->rx.buf_hold)
ath_rx_buf_link(sc, sc->rx.buf_hold);
@@ -112,13 +112,13 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
rx_edma = &sc->rx.rx_edma[qtype];
if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
return false;
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
list_del_init(&bf->list);
skb = bf->bf_mpdu;
@@ -138,7 +138,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_buf *bf, *tbf;
+ struct ath_rxbuf *bf, *tbf;
if (list_empty(&sc->rx.rxbuf)) {
ath_dbg(common, QUEUE, "No free rx buf available\n");
@@ -154,7 +154,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
static void ath_rx_remove_buffer(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
@@ -171,7 +171,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
@@ -199,7 +199,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int error = 0, i;
u32 size;
@@ -211,7 +211,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
ah->caps.rx_hp_qdepth);
- size = sizeof(struct ath_buf) * nbufs;
+ size = sizeof(struct ath_rxbuf) * nbufs;
bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
if (!bf)
return -ENOMEM;
@@ -271,7 +271,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
@@ -332,7 +332,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ath_rx_edma_cleanup(sc);
@@ -427,7 +427,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
int ath_startrecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_buf *bf, *tbf;
+ struct ath_rxbuf *bf, *tbf;
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ath_edma_start_recv(sc);
@@ -447,7 +447,7 @@ int ath_startrecv(struct ath_softc *sc)
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
ath9k_hw_putrxbuf(ah, bf->bf_daddr);
ath9k_hw_rxena(ah);
@@ -603,13 +603,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
static bool ath_edma_get_buffers(struct ath_softc *sc,
enum ath9k_rx_qtype qtype,
struct ath_rx_status *rs,
- struct ath_buf **dest)
+ struct ath_rxbuf **dest)
{
struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int ret;
skb = skb_peek(&rx_edma->rx_fifo);
@@ -653,11 +653,11 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
return true;
}
-static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs,
enum ath9k_rx_qtype qtype)
{
- struct ath_buf *bf = NULL;
+ struct ath_rxbuf *bf = NULL;
while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
if (!bf)
@@ -668,13 +668,13 @@ static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
-static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_desc *ds;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int ret;
if (list_empty(&sc->rx.rxbuf)) {
@@ -682,7 +682,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
if (bf == sc->rx.buf_hold)
return NULL;
@@ -702,7 +702,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
ret = ath9k_hw_rxprocdesc(ah, ds, rs);
if (ret == -EINPROGRESS) {
struct ath_rx_status trs;
- struct ath_buf *tbf;
+ struct ath_rxbuf *tbf;
struct ath_desc *tds;
memset(&trs, 0, sizeof(trs));
@@ -711,7 +711,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
- tbf = list_entry(bf->list.next, struct ath_buf, list);
+ tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
/*
* On some hardware the descriptor status words could
@@ -1315,7 +1315,7 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc,
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index fde6da6..0db37f2 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -39,7 +39,7 @@ struct wmi_fw_version {
struct wmi_event_swba {
__be64 tsf;
u8 beacon_pending;
-};
+} __packed;
/*
* 64 - HTC header - WMI header - 1 / txstatus
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35b515f..fc76052 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1695,16 +1695,9 @@ int ath_cabq_update(struct ath_softc *sc)
int qnum = sc->beacon.cabq->axq_qnum;
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
- /*
- * Ensure the readytime % is within the bounds.
- */
- if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
- sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
- else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
- sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
qi.tqi_readyTime = (cur_conf->beacon_interval *
- sc->config.cabqReadytime) / 100;
+ ATH_CABQ_READY_TIME) / 100;
ath_txq_update(sc, qnum, &qi);
return 0;
@@ -2023,8 +2016,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
struct ath_hw *ah = sc->sc_ah;
struct ath9k_channel *curchan = ah->curchan;
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
- (curchan->channelFlags & CHANNEL_5GHZ) &&
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
(chainmask == 0x7) && (rate < 0x90))
return 0x3;
else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
@@ -2315,7 +2307,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
if (sc->sc_ah->caldata)
- sc->sc_ah->caldata->paprd_packet_sent = true;
+ set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
new file mode 100644
index 0000000..591ebae
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -0,0 +1,16 @@
+config WCN36XX
+ tristate "Qualcomm Atheros WCN3660/3680 support"
+ depends on MAC80211 && HAS_DMA
+ ---help---
+ This module adds support for wireless adapters based on
+ Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
+
+ If you choose to build a module, it'll be called wcn36xx.
+
+config WCN36XX_DEBUGFS
+ bool "WCN36XX debugfs support"
+ depends on WCN36XX
+ ---help---
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
new file mode 100644
index 0000000..50c43b4
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_WCN36XX) := wcn36xx.o
+wcn36xx-y += main.o \
+ dxe.o \
+ txrx.o \
+ smd.o \
+ pmc.o \
+ debug.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
new file mode 100644
index 0000000..5b84f7a
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "wcn36xx.h"
+#include "debug.h"
+#include "pmc.h"
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+
+static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ char buf[3];
+
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ if (vif_priv->pw_state == WCN36XX_BMPS)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ break;
+ }
+ }
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_file_bool_bmps(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+
+ char buf[32];
+ int buf_size;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ switch (buf[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ wcn36xx_enable_keep_alive_null_packet(wcn, vif);
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ }
+ }
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_bmps = {
+ .open = simple_open,
+ .read = read_file_bool_bmps,
+ .write = write_file_bool_bmps,
+};
+
+static ssize_t write_file_dump(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ char buf[255], *tmp;
+ int buf_size;
+ u32 arg[WCN36xx_MAX_DUMP_ARGS];
+ int i;
+
+ memset(buf, 0, sizeof(buf));
+ memset(arg, 0, sizeof(arg));
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ tmp = buf;
+
+ for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
+ char *begin;
+ begin = strsep(&tmp, " ");
+ if (begin == NULL)
+ break;
+
+ if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0)
+ break;
+ }
+
+ wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
+ arg[3], arg[4]);
+ wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_dump = {
+ .open = simple_open,
+ .write = write_file_dump,
+};
+
+#define ADD_FILE(name, mode, fop, priv_data) \
+ do { \
+ struct dentry *d; \
+ d = debugfs_create_file(__stringify(name), \
+ mode, dfs->rootdir, \
+ priv_data, fop); \
+ dfs->file_##name.dentry = d; \
+ if (IS_ERR(d)) { \
+ wcn36xx_warn("Create the debugfs entry failed");\
+ dfs->file_##name.dentry = NULL; \
+ } \
+ } while (0)
+
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+
+ dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wcn->hw->wiphy->debugfsdir);
+ if (IS_ERR(dfs->rootdir)) {
+ wcn36xx_warn("Create the debugfs failed\n");
+ dfs->rootdir = NULL;
+ }
+
+ ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR,
+ &fops_wcn36xx_bmps, wcn);
+ ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn);
+}
+
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+ debugfs_remove_recursive(dfs->rootdir);
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
new file mode 100644
index 0000000..46307aa
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_DEBUG_H_
+#define _WCN36XX_DEBUG_H_
+
+#include <linux/kernel.h>
+
+#define WCN36xx_MAX_DUMP_ARGS 5
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+struct wcn36xx_dfs_file {
+ struct dentry *dentry;
+ u32 value;
+};
+
+struct wcn36xx_dfs_entry {
+ struct dentry *rootdir;
+ struct wcn36xx_dfs_file file_bmps_switcher;
+ struct wcn36xx_dfs_file file_dump;
+};
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn);
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn);
+
+#else
+static inline void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+}
+static inline void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+#endif /* _WCN36XX_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
new file mode 100644
index 0000000..ee25786
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* DXE - DMA transfer engine
+ * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
+ * through low channels data packets are transfered
+ * through high channels managment packets are transfered
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include "wcn36xx.h"
+#include "txrx.h"
+
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
+{
+ struct wcn36xx_dxe_ch *ch = is_low ?
+ &wcn->dxe_tx_l_ch :
+ &wcn->dxe_tx_h_ch;
+
+ return ch->head_blk_ctl->bd_cpu_addr;
+}
+
+static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
+{
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
+ addr, data);
+
+ writel(data, wcn->mmio + addr);
+}
+
+static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
+{
+ *data = readl(wcn->mmio + addr);
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
+ addr, *data);
+}
+
+static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
+ int i;
+
+ for (i = 0; i < ch->desc_num && ctl; i++) {
+ next = ctl->next;
+ kfree(ctl);
+ ctl = next;
+ }
+}
+
+static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *prev_ctl = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ int i;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
+ if (!cur_ctl)
+ goto out_fail;
+
+ cur_ctl->ctl_blk_order = i;
+ if (i == 0) {
+ ch->head_blk_ctl = cur_ctl;
+ ch->tail_blk_ctl = cur_ctl;
+ } else if (ch->desc_num - 1 == i) {
+ prev_ctl->next = cur_ctl;
+ cur_ctl->next = ch->head_blk_ctl;
+ } else {
+ prev_ctl->next = cur_ctl;
+ }
+ prev_ctl = cur_ctl;
+ }
+
+ return 0;
+
+out_fail:
+ wcn36xx_dxe_free_ctl_block(ch);
+ return -ENOMEM;
+}
+
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
+{
+ int ret;
+
+ wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
+ wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
+ wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
+ wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
+
+ wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
+ wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
+
+ wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
+ wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
+
+ wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
+ wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
+
+ wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
+ wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
+
+ wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
+ wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
+
+ wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
+ wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
+
+ /* DXE control block allocation */
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
+ if (ret)
+ goto out_err;
+
+ /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
+ ret = wcn->ctrl_ops->smsm_change_state(
+ WCN36XX_SMSM_WLAN_TX_ENABLE,
+ WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+
+ return 0;
+
+out_err:
+ wcn36xx_err("Failed to allocate DXE control blocks\n");
+ wcn36xx_dxe_free_ctl_blks(wcn);
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
+{
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
+}
+
+static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_desc *cur_dxe = NULL;
+ struct wcn36xx_dxe_desc *prev_dxe = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ size_t size;
+ int i;
+
+ size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+ wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+ GFP_KERNEL);
+ if (!wcn_ch->cpu_addr)
+ return -ENOMEM;
+
+ memset(wcn_ch->cpu_addr, 0, size);
+
+ cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ cur_ctl->desc = cur_dxe;
+ cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
+ i * sizeof(struct wcn36xx_dxe_desc);
+
+ switch (wcn_ch->ch_type) {
+ case WCN36XX_DXE_CH_TX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
+ break;
+ case WCN36XX_DXE_CH_TX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
+ break;
+ case WCN36XX_DXE_CH_RX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
+ break;
+ }
+ if (0 == i) {
+ cur_dxe->phy_next_l = 0;
+ } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ } else if (i == (wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ cur_dxe->phy_next_l =
+ wcn_ch->head_blk_ctl->desc_phy_addr;
+ }
+ cur_ctl = cur_ctl->next;
+ prev_dxe = cur_dxe;
+ cur_dxe++;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
+ struct wcn36xx_dxe_mem_pool *pool)
+{
+ int i, chunk_size = pool->chunk_size;
+ dma_addr_t bd_phy_addr = pool->phy_addr;
+ void *bd_cpu_addr = pool->virt_addr;
+ struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ /* Only every second dxe needs a bd pointer,
+ the other will point to the skb data */
+ if (!(i & 1)) {
+ cur->bd_phy_addr = bd_phy_addr;
+ cur->bd_cpu_addr = bd_cpu_addr;
+ bd_phy_addr += chunk_size;
+ bd_cpu_addr += chunk_size;
+ } else {
+ cur->bd_phy_addr = 0;
+ cur->bd_cpu_addr = NULL;
+ }
+ cur = cur->next;
+ }
+}
+
+static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
+{
+ int reg_data = 0;
+
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ &reg_data);
+
+ reg_data |= wcn_ch;
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ (int)reg_data);
+ return 0;
+}
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+{
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ dxe->dst_addr_l = dma_map_single(NULL,
+ skb_tail_pointer(skb),
+ WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ ctl->skb = skb;
+
+ return 0;
+}
+
+static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ int i;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ wcn36xx_dxe_fill_skb(cur_ctl);
+ cur_ctl = cur_ctl->next;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
+ int i;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ kfree_skb(cur->skb);
+ cur = cur->next;
+ }
+}
+
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ skb = wcn->tx_ack_skb;
+ wcn->tx_ack_skb = NULL;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ if (!skb) {
+ wcn36xx_warn("Spurious TX complete indication\n");
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (status == 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
+
+ ieee80211_tx_status_irqsafe(wcn->hw, skb);
+ ieee80211_wake_queues(wcn->hw);
+}
+
+static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+
+ /*
+ * Make at least one loop of do-while because in case ring is
+ * completely full head and tail are pointing to the same element
+ * and while-do will not make any cycles.
+ */
+ do {
+ if (ctl->skb) {
+ dma_unmap_single(NULL, ctl->desc->src_addr_l,
+ ctl->skb->len, DMA_TO_DEVICE);
+ info = IEEE80211_SKB_CB(ctl->skb);
+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
+ /* Keep frame until TX status comes */
+ ieee80211_free_txskb(wcn->hw, ctl->skb);
+ }
+ spin_lock_irqsave(&ctl->skb_lock, flags);
+ if (wcn->queues_stopped) {
+ wcn->queues_stopped = false;
+ ieee80211_wake_queues(wcn->hw);
+ }
+ spin_unlock_irqrestore(&ctl->skb_lock, flags);
+
+ ctl->skb = NULL;
+ }
+ ctl = ctl->next;
+ } while (ctl != ch->head_blk_ctl &&
+ !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
+
+ ch->tail_blk_ctl = ctl;
+}
+
+static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+ int int_src, int_reason;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
+ &int_reason);
+
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+ }
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
+ &int_reason);
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+
+ disable_irq_nosync(wcn->rx_irq);
+ wcn36xx_dxe_rx_frame(wcn);
+ enable_irq(wcn->rx_irq);
+ return IRQ_HANDLED;
+}
+
+static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
+{
+ int ret;
+
+ ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
+ IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc tx irq\n");
+ goto out_err;
+ }
+
+ ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
+ "wcn36xx_rx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc rx irq\n");
+ goto out_txirq;
+ }
+
+ enable_irq_wake(wcn->rx_irq);
+
+ return 0;
+
+out_txirq:
+ free_irq(wcn->tx_irq, wcn);
+out_err:
+ return ret;
+
+}
+
+static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+
+ while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+ skb = ctl->skb;
+ dma_addr = dxe->dst_addr_l;
+ wcn36xx_dxe_fill_skb(ctl);
+
+ switch (ch->ch_type) {
+ case WCN36XX_DXE_CH_RX_L:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ break;
+ default:
+ wcn36xx_warn("Unknown channel\n");
+ }
+
+ dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ wcn36xx_rx_skb(wcn, skb);
+ ctl = ctl->next;
+ dxe = ctl->desc;
+ }
+
+ ch->head_blk_ctl = ctl;
+
+ return 0;
+}
+
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
+{
+ int int_src;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ /* RX_LOW_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+ }
+
+ /* RX_HIGH_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+ /* Clean up all the INT within this channel */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
+ }
+
+ if (!int_src)
+ wcn36xx_warn("No DXE interrupt pending\n");
+}
+
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
+{
+ size_t s;
+ void *cpu_addr;
+
+ /* Allocate BD headers for MGMT frames */
+
+ /* Where this come from ask QC */
+ wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->mgmt_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ /* Allocate BD headers for DATA frames */
+
+ /* Where this come from ask QC */
+ wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->data_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ return 0;
+
+out_err:
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_err("Failed to allocate BD mempool\n");
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
+{
+ if (wcn->mgmt_mem_pool.virt_addr)
+ dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H,
+ wcn->mgmt_mem_pool.virt_addr,
+ wcn->mgmt_mem_pool.phy_addr);
+
+ if (wcn->data_mem_pool.virt_addr) {
+ dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L,
+ wcn->data_mem_pool.virt_addr,
+ wcn->data_mem_pool.phy_addr);
+ }
+}
+
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low)
+{
+ struct wcn36xx_dxe_ctl *ctl = NULL;
+ struct wcn36xx_dxe_desc *desc = NULL;
+ struct wcn36xx_dxe_ch *ch = NULL;
+ unsigned long flags;
+
+ ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+
+ ctl = ch->head_blk_ctl;
+
+ spin_lock_irqsave(&ctl->next->skb_lock, flags);
+
+ /*
+ * If skb is not null that means that we reached the tail of the ring
+ * hence ring is full. Stop queues to let mac80211 back off until ring
+ * has an empty slot again.
+ */
+ if (NULL != ctl->next->skb) {
+ ieee80211_stop_queues(wcn->hw);
+ wcn->queues_stopped = true;
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+
+ ctl->skb = NULL;
+ desc = ctl->desc;
+
+ /* Set source address of the BD we send */
+ desc->src_addr_l = ctl->bd_phy_addr;
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = sizeof(struct wcn36xx_tx_bd);
+ desc->ctrl = ch->ctrl_bd;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
+ "BD >>> ", (char *)ctl->bd_cpu_addr,
+ sizeof(struct wcn36xx_tx_bd));
+
+ /* Set source address of the SKB we send */
+ ctl = ctl->next;
+ ctl->skb = skb;
+ desc = ctl->desc;
+ if (ctl->bd_cpu_addr) {
+ wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+ return -EINVAL;
+ }
+
+ desc->src_addr_l = dma_map_single(NULL,
+ ctl->skb->data,
+ ctl->skb->len,
+ DMA_TO_DEVICE);
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = ctl->skb->len;
+
+ /* set dxe descriptor to VALID */
+ desc->ctrl = ch->ctrl_skb;
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
+ (char *)ctl->skb->data, ctl->skb->len);
+
+ /* Move the head of the ring to the next empty descriptor */
+ ch->head_blk_ctl = ctl->next;
+
+ /*
+ * When connected and trying to send data frame chip can be in sleep
+ * mode and writing to the register will not wake up the chip. Instead
+ * notify chip about new frame through SMSM bus.
+ */
+ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
+ wcn->ctrl_ops->smsm_change_state(
+ 0,
+ WCN36XX_SMSM_WLAN_TX_ENABLE);
+ } else {
+ /* indicate End Of Packet and generate interrupt on descriptor
+ * done.
+ */
+ wcn36xx_dxe_write_register(wcn,
+ ch->reg_ctrl, ch->def_ctrl);
+ }
+
+ return 0;
+}
+
+int wcn36xx_dxe_init(struct wcn36xx *wcn)
+{
+ int reg_data = 0, ret;
+
+ reg_data = WCN36XX_DXE_REG_RESET;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
+
+ /* Setting interrupt path */
+ reg_data = WCN36XX_DXE_CCU_INT;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+
+ /***************************************/
+ /* Init descriptors for TX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
+ wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX LOW */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_L,
+ WCN36XX_DXE_WQ_TX_L);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+
+ /***************************************/
+ /* Init descriptors for TX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
+ wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX HIGH */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_H,
+ WCN36XX_DXE_WQ_TX_H);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+
+ /***************************************/
+ /* Init descriptors for RX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+
+ /* For RX we need to preallocated buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_L,
+ WCN36XX_DXE_WQ_RX_L);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_L,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+
+ /***************************************/
+ /* Init descriptors for RX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+
+ /* For RX we need to prealocat buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
+
+ /* Write chanel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_H,
+ WCN36XX_DXE_WQ_RX_H);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_H,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+
+ ret = wcn36xx_dxe_request_irqs(wcn);
+ if (ret < 0)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ return ret;
+}
+
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
+{
+ free_irq(wcn->tx_irq, wcn);
+ free_irq(wcn->rx_irq, wcn);
+
+ if (wcn->tx_ack_skb) {
+ ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+ wcn->tx_ack_skb = NULL;
+ }
+
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
new file mode 100644
index 0000000..c88562f
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DXE_H_
+#define _DXE_H_
+
+#include "wcn36xx.h"
+
+/*
+TX_LOW = DMA0
+TX_HIGH = DMA4
+RX_LOW = DMA1
+RX_HIGH = DMA3
+H2H_TEST_RX_TX = DMA2
+*/
+
+/* DXE registers */
+#define WCN36XX_DXE_MEM_BASE 0x03000000
+#define WCN36XX_DXE_MEM_REG 0x202000
+
+#define WCN36XX_DXE_CCU_INT 0xA0011
+#define WCN36XX_DXE_REG_CCU_INT 0x200b10
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_CTRL_TX_L 0x328a44
+#define WCN36XX_DXE_CTRL_TX_H 0x32ce44
+#define WCN36XX_DXE_CTRL_RX_L 0x12ad2f
+#define WCN36XX_DXE_CTRL_RX_H 0x12d12f
+#define WCN36XX_DXE_CTRL_TX_H_BD 0x30ce45
+#define WCN36XX_DXE_CTRL_TX_H_SKB 0x32ce4d
+#define WCN36XX_DXE_CTRL_TX_L_BD 0x308a45
+#define WCN36XX_DXE_CTRL_TX_L_SKB 0x328a4d
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_WQ_TX_L 0x17
+#define WCN36XX_DXE_WQ_TX_H 0x17
+#define WCN36XX_DXE_WQ_RX_L 0xB
+#define WCN36XX_DXE_WQ_RX_H 0x4
+
+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
+/* TODO This must calculated properly but not hardcoded */
+/* DXE default control register values */
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H 0x84FED12F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H 0x853ECF4D
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L 0x843e8b4d
+
+/* Common DXE registers */
+#define WCN36XX_DXE_MEM_CSR (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_REG_CSR_RESET (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_ENCH_ADDR (WCN36XX_DXE_MEM_REG + 0x04)
+#define WCN36XX_DXE_REG_CH_EN (WCN36XX_DXE_MEM_REG + 0x08)
+#define WCN36XX_DXE_REG_CH_DONE (WCN36XX_DXE_MEM_REG + 0x0C)
+#define WCN36XX_DXE_REG_CH_ERR (WCN36XX_DXE_MEM_REG + 0x10)
+#define WCN36XX_DXE_INT_MASK_REG (WCN36XX_DXE_MEM_REG + 0x18)
+#define WCN36XX_DXE_INT_SRC_RAW_REG (WCN36XX_DXE_MEM_REG + 0x20)
+ /* #define WCN36XX_DXE_INT_CH6_MASK 0x00000040 */
+ /* #define WCN36XX_DXE_INT_CH5_MASK 0x00000020 */
+ #define WCN36XX_DXE_INT_CH4_MASK 0x00000010
+ #define WCN36XX_DXE_INT_CH3_MASK 0x00000008
+ /* #define WCN36XX_DXE_INT_CH2_MASK 0x00000004 */
+ #define WCN36XX_DXE_INT_CH1_MASK 0x00000002
+ #define WCN36XX_DXE_INT_CH0_MASK 0x00000001
+#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)
+
+#define WCN36XX_DXE_REG_RESET 0x5c89
+
+/* Temporary BMU Workqueue 4 */
+#define WCN36XX_DXE_BMU_WQ_RX_LOW 0xB
+#define WCN36XX_DXE_BMU_WQ_RX_HIGH 0x4
+/* DMA channel offset */
+#define WCN36XX_DXE_TX_LOW_OFFSET 0x400
+#define WCN36XX_DXE_TX_HIGH_OFFSET 0x500
+#define WCN36XX_DXE_RX_LOW_OFFSET 0x440
+#define WCN36XX_DXE_RX_HIGH_OFFSET 0x4C0
+
+/* Address of the next DXE descriptor */
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR 0x001C
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+
+/* DXE Descriptor source address */
+#define WCN36XX_DXE_CH_SRC_ADDR 0x000C
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+
+/* DXE Descriptor address destination address */
+#define WCN36XX_DXE_CH_DEST_ADDR 0x0014
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+
+/* Interrupt status */
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR 0x0004
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+
+
+/* DXE default control register */
+#define WCN36XX_DXE_REG_CTL_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET)
+#define WCN36XX_DXE_REG_CTL_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET)
+
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+
+
+/* Interrupt control channel mask */
+#define WCN36XX_INT_MASK_CHAN_TX_L 0x00000001
+#define WCN36XX_INT_MASK_CHAN_RX_L 0x00000002
+#define WCN36XX_INT_MASK_CHAN_RX_H 0x00000008
+#define WCN36XX_INT_MASK_CHAN_TX_H 0x00000010
+
+#define WCN36XX_BD_CHUNK_SIZE 128
+
+#define WCN36XX_PKT_SIZE 0xF20
+enum wcn36xx_dxe_ch_type {
+ WCN36XX_DXE_CH_TX_L,
+ WCN36XX_DXE_CH_TX_H,
+ WCN36XX_DXE_CH_RX_L,
+ WCN36XX_DXE_CH_RX_H
+};
+
+/* amount of descriptors per channel */
+enum wcn36xx_dxe_ch_desc_num {
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L = 128,
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H = 10,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_L = 512,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_H = 40
+};
+
+/**
+ * struct wcn36xx_dxe_desc - describes descriptor of one DXE buffer
+ *
+ * @ctrl: is a union that consists of following bits:
+ * union {
+ * u32 valid :1; //0 = DMA stop, 1 = DMA continue with this
+ * //descriptor
+ * u32 transfer_type :2; //0 = Host to Host space
+ * u32 eop :1; //End of Packet
+ * u32 bd_handling :1; //if transferType = Host to BMU, then 0
+ * // means first 128 bytes contain BD, and 1
+ * // means create new empty BD
+ * u32 siq :1; // SIQ
+ * u32 diq :1; // DIQ
+ * u32 pdu_rel :1; //0 = don't release BD and PDUs when done,
+ * // 1 = release them
+ * u32 bthld_sel :4; //BMU Threshold Select
+ * u32 prio :3; //Specifies the priority level to use for
+ * // the transfer
+ * u32 stop_channel :1; //1 = DMA stops processing further, channel
+ * //requires re-enabling after this
+ * u32 intr :1; //Interrupt on Descriptor Done
+ * u32 rsvd :1; //reserved
+ * u32 size :14;//14 bits used - ignored for BMU transfers,
+ * //only used for host to host transfers?
+ * } ctrl;
+ */
+struct wcn36xx_dxe_desc {
+ u32 ctrl;
+ u32 fr_len;
+
+ u32 src_addr_l;
+ u32 dst_addr_l;
+ u32 phy_next_l;
+ u32 src_addr_h;
+ u32 dst_addr_h;
+ u32 phy_next_h;
+} __packed;
+
+/* DXE Control block */
+struct wcn36xx_dxe_ctl {
+ struct wcn36xx_dxe_ctl *next;
+ struct wcn36xx_dxe_desc *desc;
+ unsigned int desc_phy_addr;
+ int ctl_blk_order;
+ struct sk_buff *skb;
+ spinlock_t skb_lock;
+ void *bd_cpu_addr;
+ dma_addr_t bd_phy_addr;
+};
+
+struct wcn36xx_dxe_ch {
+ enum wcn36xx_dxe_ch_type ch_type;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ enum wcn36xx_dxe_ch_desc_num desc_num;
+ /* DXE control block ring */
+ struct wcn36xx_dxe_ctl *head_blk_ctl;
+ struct wcn36xx_dxe_ctl *tail_blk_ctl;
+
+ /* DXE channel specific configs */
+ u32 dxe_wq;
+ u32 ctrl_bd;
+ u32 ctrl_skb;
+ u32 reg_ctrl;
+ u32 def_ctrl;
+};
+
+/* Memory Pool for BD headers */
+struct wcn36xx_dxe_mem_pool {
+ int chunk_size;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+};
+
+struct wcn36xx_vif;
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn);
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn);
+int wcn36xx_dxe_init(struct wcn36xx *wcn);
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn);
+int wcn36xx_dxe_init_channels(struct wcn36xx *wcn);
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low);
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low);
+#endif /* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
new file mode 100644
index 0000000..c02dbc6
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -0,0 +1,4657 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HAL_H_
+#define _HAL_H_
+
+/*---------------------------------------------------------------------------
+ API VERSIONING INFORMATION
+
+ The RIVA API is versioned as MAJOR.MINOR.VERSION.REVISION
+ The MAJOR is incremented for major product/architecture changes
+ (and then MINOR/VERSION/REVISION are zeroed)
+ The MINOR is incremented for minor product/architecture changes
+ (and then VERSION/REVISION are zeroed)
+ The VERSION is incremented if a significant API change occurs
+ (and then REVISION is zeroed)
+ The REVISION is incremented if an insignificant API change occurs
+ or if a new API is added
+ All values are in the range 0..255 (ie they are 8-bit values)
+ ---------------------------------------------------------------------------*/
+#define WCN36XX_HAL_VER_MAJOR 1
+#define WCN36XX_HAL_VER_MINOR 4
+#define WCN36XX_HAL_VER_VERSION 1
+#define WCN36XX_HAL_VER_REVISION 2
+
+/* This is to force compiler to use the maximum of an int ( 4 bytes ) */
+#define WCN36XX_HAL_MAX_ENUM_SIZE 0x7FFFFFFF
+#define WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE 0x7FFF
+
+/* Max no. of transmit categories */
+#define STACFG_MAX_TC 8
+
+/* The maximum value of access category */
+#define WCN36XX_HAL_MAX_AC 4
+
+#define WCN36XX_HAL_IPV4_ADDR_LEN 4
+
+#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
+
+/* Default Beacon template size */
+#define BEACON_TEMPLATE_SIZE 0x180
+
+/* Param Change Bitmap sent to HAL */
+#define PARAM_BCN_INTERVAL_CHANGED (1 << 0)
+#define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1)
+#define PARAM_SHORT_SLOT_TIME_CHANGED (1 << 2)
+#define PARAM_llACOEXIST_CHANGED (1 << 3)
+#define PARAM_llBCOEXIST_CHANGED (1 << 4)
+#define PARAM_llGCOEXIST_CHANGED (1 << 5)
+#define PARAM_HT20MHZCOEXIST_CHANGED (1<<6)
+#define PARAM_NON_GF_DEVICES_PRESENT_CHANGED (1<<7)
+#define PARAM_RIFS_MODE_CHANGED (1<<8)
+#define PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED (1<<9)
+#define PARAM_OBSS_MODE_CHANGED (1<<10)
+#define PARAM_BEACON_UPDATE_MASK \
+ (PARAM_BCN_INTERVAL_CHANGED | \
+ PARAM_SHORT_PREAMBLE_CHANGED | \
+ PARAM_SHORT_SLOT_TIME_CHANGED | \
+ PARAM_llACOEXIST_CHANGED | \
+ PARAM_llBCOEXIST_CHANGED | \
+ PARAM_llGCOEXIST_CHANGED | \
+ PARAM_HT20MHZCOEXIST_CHANGED | \
+ PARAM_NON_GF_DEVICES_PRESENT_CHANGED | \
+ PARAM_RIFS_MODE_CHANGED | \
+ PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED | \
+ PARAM_OBSS_MODE_CHANGED)
+
+/* dump command response Buffer size */
+#define DUMPCMD_RSP_BUFFER 100
+
+/* version string max length (including NULL) */
+#define WCN36XX_HAL_VERSION_LENGTH 64
+
+/* message types for messages exchanged between WDI and HAL */
+enum wcn36xx_hal_host_msg_type {
+ /* Init/De-Init */
+ WCN36XX_HAL_START_REQ = 0,
+ WCN36XX_HAL_START_RSP = 1,
+ WCN36XX_HAL_STOP_REQ = 2,
+ WCN36XX_HAL_STOP_RSP = 3,
+
+ /* Scan */
+ WCN36XX_HAL_INIT_SCAN_REQ = 4,
+ WCN36XX_HAL_INIT_SCAN_RSP = 5,
+ WCN36XX_HAL_START_SCAN_REQ = 6,
+ WCN36XX_HAL_START_SCAN_RSP = 7,
+ WCN36XX_HAL_END_SCAN_REQ = 8,
+ WCN36XX_HAL_END_SCAN_RSP = 9,
+ WCN36XX_HAL_FINISH_SCAN_REQ = 10,
+ WCN36XX_HAL_FINISH_SCAN_RSP = 11,
+
+ /* HW STA configuration/deconfiguration */
+ WCN36XX_HAL_CONFIG_STA_REQ = 12,
+ WCN36XX_HAL_CONFIG_STA_RSP = 13,
+ WCN36XX_HAL_DELETE_STA_REQ = 14,
+ WCN36XX_HAL_DELETE_STA_RSP = 15,
+ WCN36XX_HAL_CONFIG_BSS_REQ = 16,
+ WCN36XX_HAL_CONFIG_BSS_RSP = 17,
+ WCN36XX_HAL_DELETE_BSS_REQ = 18,
+ WCN36XX_HAL_DELETE_BSS_RSP = 19,
+
+ /* Infra STA asscoiation */
+ WCN36XX_HAL_JOIN_REQ = 20,
+ WCN36XX_HAL_JOIN_RSP = 21,
+ WCN36XX_HAL_POST_ASSOC_REQ = 22,
+ WCN36XX_HAL_POST_ASSOC_RSP = 23,
+
+ /* Security */
+ WCN36XX_HAL_SET_BSSKEY_REQ = 24,
+ WCN36XX_HAL_SET_BSSKEY_RSP = 25,
+ WCN36XX_HAL_SET_STAKEY_REQ = 26,
+ WCN36XX_HAL_SET_STAKEY_RSP = 27,
+ WCN36XX_HAL_RMV_BSSKEY_REQ = 28,
+ WCN36XX_HAL_RMV_BSSKEY_RSP = 29,
+ WCN36XX_HAL_RMV_STAKEY_REQ = 30,
+ WCN36XX_HAL_RMV_STAKEY_RSP = 31,
+
+ /* Qos Related */
+ WCN36XX_HAL_ADD_TS_REQ = 32,
+ WCN36XX_HAL_ADD_TS_RSP = 33,
+ WCN36XX_HAL_DEL_TS_REQ = 34,
+ WCN36XX_HAL_DEL_TS_RSP = 35,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_REQ = 36,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_RSP = 37,
+ WCN36XX_HAL_ADD_BA_REQ = 38,
+ WCN36XX_HAL_ADD_BA_RSP = 39,
+ WCN36XX_HAL_DEL_BA_REQ = 40,
+ WCN36XX_HAL_DEL_BA_RSP = 41,
+
+ WCN36XX_HAL_CH_SWITCH_REQ = 42,
+ WCN36XX_HAL_CH_SWITCH_RSP = 43,
+ WCN36XX_HAL_SET_LINK_ST_REQ = 44,
+ WCN36XX_HAL_SET_LINK_ST_RSP = 45,
+ WCN36XX_HAL_GET_STATS_REQ = 46,
+ WCN36XX_HAL_GET_STATS_RSP = 47,
+ WCN36XX_HAL_UPDATE_CFG_REQ = 48,
+ WCN36XX_HAL_UPDATE_CFG_RSP = 49,
+
+ WCN36XX_HAL_MISSED_BEACON_IND = 50,
+ WCN36XX_HAL_UNKNOWN_ADDR2_FRAME_RX_IND = 51,
+ WCN36XX_HAL_MIC_FAILURE_IND = 52,
+ WCN36XX_HAL_FATAL_ERROR_IND = 53,
+ WCN36XX_HAL_SET_KEYDONE_MSG = 54,
+
+ /* NV Interface */
+ WCN36XX_HAL_DOWNLOAD_NV_REQ = 55,
+ WCN36XX_HAL_DOWNLOAD_NV_RSP = 56,
+
+ WCN36XX_HAL_ADD_BA_SESSION_REQ = 57,
+ WCN36XX_HAL_ADD_BA_SESSION_RSP = 58,
+ WCN36XX_HAL_TRIGGER_BA_REQ = 59,
+ WCN36XX_HAL_TRIGGER_BA_RSP = 60,
+ WCN36XX_HAL_UPDATE_BEACON_REQ = 61,
+ WCN36XX_HAL_UPDATE_BEACON_RSP = 62,
+ WCN36XX_HAL_SEND_BEACON_REQ = 63,
+ WCN36XX_HAL_SEND_BEACON_RSP = 64,
+
+ WCN36XX_HAL_SET_BCASTKEY_REQ = 65,
+ WCN36XX_HAL_SET_BCASTKEY_RSP = 66,
+ WCN36XX_HAL_DELETE_STA_CONTEXT_IND = 67,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ = 68,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP = 69,
+
+ /* PTT interface support */
+ WCN36XX_HAL_PROCESS_PTT_REQ = 70,
+ WCN36XX_HAL_PROCESS_PTT_RSP = 71,
+
+ /* BTAMP related events */
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_REQ = 72,
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_RSP = 73,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_REQ = 74,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_RSP = 75,
+
+ WCN36XX_HAL_ENTER_IMPS_REQ = 76,
+ WCN36XX_HAL_EXIT_IMPS_REQ = 77,
+ WCN36XX_HAL_ENTER_BMPS_REQ = 78,
+ WCN36XX_HAL_EXIT_BMPS_REQ = 79,
+ WCN36XX_HAL_ENTER_UAPSD_REQ = 80,
+ WCN36XX_HAL_EXIT_UAPSD_REQ = 81,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_REQ = 82,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_REQ = 83,
+ WCN36XX_HAL_ADD_BCN_FILTER_REQ = 84,
+ WCN36XX_HAL_REM_BCN_FILTER_REQ = 85,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN = 86,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN = 87,
+ WCN36XX_HAL_ENTER_WOWL_REQ = 88,
+ WCN36XX_HAL_EXIT_WOWL_REQ = 89,
+ WCN36XX_HAL_HOST_OFFLOAD_REQ = 90,
+ WCN36XX_HAL_SET_RSSI_THRESH_REQ = 91,
+ WCN36XX_HAL_GET_RSSI_REQ = 92,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_REQ = 93,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_REQ = 94,
+
+ WCN36XX_HAL_ENTER_IMPS_RSP = 95,
+ WCN36XX_HAL_EXIT_IMPS_RSP = 96,
+ WCN36XX_HAL_ENTER_BMPS_RSP = 97,
+ WCN36XX_HAL_EXIT_BMPS_RSP = 98,
+ WCN36XX_HAL_ENTER_UAPSD_RSP = 99,
+ WCN36XX_HAL_EXIT_UAPSD_RSP = 100,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_RSP = 101,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_RSP = 102,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_RSP = 103,
+ WCN36XX_HAL_ADD_BCN_FILTER_RSP = 104,
+ WCN36XX_HAL_REM_BCN_FILTER_RSP = 105,
+ WCN36XX_HAL_SET_RSSI_THRESH_RSP = 106,
+ WCN36XX_HAL_HOST_OFFLOAD_RSP = 107,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN_RSP = 108,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN_RSP = 109,
+ WCN36XX_HAL_ENTER_WOWL_RSP = 110,
+ WCN36XX_HAL_EXIT_WOWL_RSP = 111,
+ WCN36XX_HAL_RSSI_NOTIFICATION_IND = 112,
+ WCN36XX_HAL_GET_RSSI_RSP = 113,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_RSP = 114,
+
+ /* 11k related events */
+ WCN36XX_HAL_SET_MAX_TX_POWER_REQ = 115,
+ WCN36XX_HAL_SET_MAX_TX_POWER_RSP = 116,
+
+ /* 11R related msgs */
+ WCN36XX_HAL_AGGR_ADD_TS_REQ = 117,
+ WCN36XX_HAL_AGGR_ADD_TS_RSP = 118,
+
+ /* P2P WLAN_FEATURE_P2P */
+ WCN36XX_HAL_SET_P2P_GONOA_REQ = 119,
+ WCN36XX_HAL_SET_P2P_GONOA_RSP = 120,
+
+ /* WLAN Dump commands */
+ WCN36XX_HAL_DUMP_COMMAND_REQ = 121,
+ WCN36XX_HAL_DUMP_COMMAND_RSP = 122,
+
+ /* OEM_DATA FEATURE SUPPORT */
+ WCN36XX_HAL_START_OEM_DATA_REQ = 123,
+ WCN36XX_HAL_START_OEM_DATA_RSP = 124,
+
+ /* ADD SELF STA REQ and RSP */
+ WCN36XX_HAL_ADD_STA_SELF_REQ = 125,
+ WCN36XX_HAL_ADD_STA_SELF_RSP = 126,
+
+ /* DEL SELF STA SUPPORT */
+ WCN36XX_HAL_DEL_STA_SELF_REQ = 127,
+ WCN36XX_HAL_DEL_STA_SELF_RSP = 128,
+
+ /* Coex Indication */
+ WCN36XX_HAL_COEX_IND = 129,
+
+ /* Tx Complete Indication */
+ WCN36XX_HAL_OTA_TX_COMPL_IND = 130,
+
+ /* Host Suspend/resume messages */
+ WCN36XX_HAL_HOST_SUSPEND_IND = 131,
+ WCN36XX_HAL_HOST_RESUME_REQ = 132,
+ WCN36XX_HAL_HOST_RESUME_RSP = 133,
+
+ WCN36XX_HAL_SET_TX_POWER_REQ = 134,
+ WCN36XX_HAL_SET_TX_POWER_RSP = 135,
+ WCN36XX_HAL_GET_TX_POWER_REQ = 136,
+ WCN36XX_HAL_GET_TX_POWER_RSP = 137,
+
+ WCN36XX_HAL_P2P_NOA_ATTR_IND = 138,
+
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_REQ = 139,
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_RSP = 140,
+ WCN36XX_HAL_GET_TPC_REPORT_REQ = 141,
+ WCN36XX_HAL_GET_TPC_REPORT_RSP = 142,
+ WCN36XX_HAL_RADAR_DETECT_IND = 143,
+ WCN36XX_HAL_RADAR_DETECT_INTR_IND = 144,
+ WCN36XX_HAL_KEEP_ALIVE_REQ = 145,
+ WCN36XX_HAL_KEEP_ALIVE_RSP = 146,
+
+ /* PNO messages */
+ WCN36XX_HAL_SET_PREF_NETWORK_REQ = 147,
+ WCN36XX_HAL_SET_PREF_NETWORK_RSP = 148,
+ WCN36XX_HAL_SET_RSSI_FILTER_REQ = 149,
+ WCN36XX_HAL_SET_RSSI_FILTER_RSP = 150,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ = 151,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP = 152,
+ WCN36XX_HAL_PREF_NETW_FOUND_IND = 153,
+
+ WCN36XX_HAL_SET_TX_PER_TRACKING_REQ = 154,
+ WCN36XX_HAL_SET_TX_PER_TRACKING_RSP = 155,
+ WCN36XX_HAL_TX_PER_HIT_IND = 156,
+
+ WCN36XX_HAL_8023_MULTICAST_LIST_REQ = 157,
+ WCN36XX_HAL_8023_MULTICAST_LIST_RSP = 158,
+
+ WCN36XX_HAL_SET_PACKET_FILTER_REQ = 159,
+ WCN36XX_HAL_SET_PACKET_FILTER_RSP = 160,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_REQ = 161,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_RSP = 162,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_REQ = 163,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_RSP = 164,
+
+ /*
+ * This is temp fix. Should be removed once Host and Riva code is
+ * in sync.
+ */
+ WCN36XX_HAL_INIT_SCAN_CON_REQ = 165,
+
+ WCN36XX_HAL_SET_POWER_PARAMS_REQ = 166,
+ WCN36XX_HAL_SET_POWER_PARAMS_RSP = 167,
+
+ WCN36XX_HAL_TSM_STATS_REQ = 168,
+ WCN36XX_HAL_TSM_STATS_RSP = 169,
+
+ /* wake reason indication (WOW) */
+ WCN36XX_HAL_WAKE_REASON_IND = 170,
+
+ /* GTK offload support */
+ WCN36XX_HAL_GTK_OFFLOAD_REQ = 171,
+ WCN36XX_HAL_GTK_OFFLOAD_RSP = 172,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ = 173,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP = 174,
+
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ = 175,
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP = 176,
+ WCN36XX_HAL_EXCLUDE_UNENCRYPTED_IND = 177,
+
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ = 178,
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_RSP = 179,
+
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_REQ = 182,
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_RSP = 183,
+
+ WCN36XX_HAL_P2P_NOA_START_IND = 184,
+
+ WCN36XX_HAL_GET_ROAM_RSSI_REQ = 185,
+ WCN36XX_HAL_GET_ROAM_RSSI_RSP = 186,
+
+ WCN36XX_HAL_CLASS_B_STATS_IND = 187,
+ WCN36XX_HAL_DEL_BA_IND = 188,
+ WCN36XX_HAL_DHCP_START_IND = 189,
+ WCN36XX_HAL_DHCP_STOP_IND = 190,
+
+ WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
+};
+
+/* Enumeration for Version */
+enum wcn36xx_hal_host_msg_version {
+ WCN36XX_HAL_MSG_VERSION0 = 0,
+ WCN36XX_HAL_MSG_VERSION1 = 1,
+ /* define as 2 bytes data */
+ WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION = 0x7FFF,
+ WCN36XX_HAL_MSG_VERSION_MAX_FIELD = WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION
+};
+
+enum driver_type {
+ DRIVER_TYPE_PRODUCTION = 0,
+ DRIVER_TYPE_MFG = 1,
+ DRIVER_TYPE_DVT = 2,
+ DRIVER_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stop_type {
+ HAL_STOP_TYPE_SYS_RESET,
+ HAL_STOP_TYPE_SYS_DEEP_SLEEP,
+ HAL_STOP_TYPE_RF_KILL,
+ HAL_STOP_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_sys_mode {
+ HAL_SYS_MODE_NORMAL,
+ HAL_SYS_MODE_LEARN,
+ HAL_SYS_MODE_SCAN,
+ HAL_SYS_MODE_PROMISC,
+ HAL_SYS_MODE_SUSPEND_LINK,
+ HAL_SYS_MODE_ROAM_SCAN,
+ HAL_SYS_MODE_ROAM_SUSPEND_LINK,
+ HAL_SYS_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum phy_chan_bond_state {
+ /* 20MHz IF bandwidth centered on IF carrier */
+ PHY_SINGLE_CHANNEL_CENTERED = 0,
+
+ /* 40MHz IF bandwidth with lower 20MHz supporting the primary channel */
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY = 1,
+
+ /* 40MHz IF bandwidth centered on IF carrier */
+ PHY_DOUBLE_CHANNEL_CENTERED = 2,
+
+ /* 40MHz IF bandwidth with higher 20MHz supporting the primary ch */
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED = 4,
+
+ /* 20/40MHZ offset CENTERED 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED = 5,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED = 6,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW = 7,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW = 8,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH = 9,
+
+ /* 20/40MHZ offset-HIGH 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH = 10,
+
+ PHY_CHANNEL_BONDING_STATE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Spatial Multiplexing(SM) Power Save mode */
+enum wcn36xx_hal_ht_mimo_state {
+ /* Static SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_STATIC = 0,
+
+ /* Dynamic SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_DYNAMIC = 1,
+
+ /* reserved */
+ WCN36XX_HAL_HT_MIMO_PS_NA = 2,
+
+ /* SM Power Save disabled */
+ WCN36XX_HAL_HT_MIMO_PS_NO_LIMIT = 3,
+
+ WCN36XX_HAL_HT_MIMO_PS_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* each station added has a rate mode which specifies the sta attributes */
+enum sta_rate_mode {
+ STA_TAURUS = 0,
+ STA_TITAN,
+ STA_POLARIS,
+ STA_11b,
+ STA_11bg,
+ STA_11a,
+ STA_11n,
+ STA_11ac,
+ STA_INVALID_RATE_MODE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* 1,2,5.5,11 */
+#define WCN36XX_HAL_NUM_DSSS_RATES 4
+
+/* 6,9,12,18,24,36,48,54 */
+#define WCN36XX_HAL_NUM_OFDM_RATES 8
+
+/* 72,96,108 */
+#define WCN36XX_HAL_NUM_POLARIS_RATES 3
+
+#define WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET 16
+
+enum wcn36xx_hal_bss_type {
+ WCN36XX_HAL_INFRASTRUCTURE_MODE,
+
+ /* Added for softAP support */
+ WCN36XX_HAL_INFRA_AP_MODE,
+
+ WCN36XX_HAL_IBSS_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_STA_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_AP_MODE,
+
+ WCN36XX_HAL_AUTO_MODE,
+
+ WCN36XX_HAL_DONOT_USE_BSS_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_nw_type {
+ WCN36XX_HAL_11A_NW_TYPE,
+ WCN36XX_HAL_11B_NW_TYPE,
+ WCN36XX_HAL_11G_NW_TYPE,
+ WCN36XX_HAL_11N_NW_TYPE,
+ WCN36XX_HAL_DONOT_USE_NW_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WCN36XX_HAL_MAC_RATESET_EID_MAX 12
+
+enum wcn36xx_hal_ht_operating_mode {
+ /* No Protection */
+ WCN36XX_HAL_HT_OP_MODE_PURE,
+
+ /* Overlap Legacy device present, protection is optional */
+ WCN36XX_HAL_HT_OP_MODE_OVERLAP_LEGACY,
+
+ /* No legacy device, but 20 MHz HT present */
+ WCN36XX_HAL_HT_OP_MODE_NO_LEGACY_20MHZ_HT,
+
+ /* Protection is required */
+ WCN36XX_HAL_HT_OP_MODE_MIXED,
+
+ WCN36XX_HAL_HT_OP_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type enum used with peer */
+enum ani_ed_type {
+ WCN36XX_HAL_ED_NONE,
+ WCN36XX_HAL_ED_WEP40,
+ WCN36XX_HAL_ED_WEP104,
+ WCN36XX_HAL_ED_TKIP,
+ WCN36XX_HAL_ED_CCMP,
+ WCN36XX_HAL_ED_WPI,
+ WCN36XX_HAL_ED_AES_128_CMAC,
+ WCN36XX_HAL_ED_NOT_IMPLEMENTED = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WLAN_MAX_KEY_RSC_LEN 16
+#define WLAN_WAPI_KEY_RSC_LEN 16
+
+/* MAX key length when ULA is used */
+#define WCN36XX_HAL_MAC_MAX_KEY_LENGTH 32
+#define WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS 4
+
+/*
+ * Enum to specify whether key is used for TX only, RX only or both.
+ */
+enum ani_key_direction {
+ WCN36XX_HAL_TX_ONLY,
+ WCN36XX_HAL_RX_ONLY,
+ WCN36XX_HAL_TX_RX,
+ WCN36XX_HAL_TX_DEFAULT,
+ WCN36XX_HAL_DONOT_USE_KEY_DIRECTION = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum ani_wep_type {
+ WCN36XX_HAL_WEP_STATIC,
+ WCN36XX_HAL_WEP_DYNAMIC,
+ WCN36XX_HAL_WEP_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_link_state {
+
+ WCN36XX_HAL_LINK_IDLE_STATE = 0,
+ WCN36XX_HAL_LINK_PREASSOC_STATE = 1,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE = 2,
+ WCN36XX_HAL_LINK_AP_STATE = 3,
+ WCN36XX_HAL_LINK_IBSS_STATE = 4,
+
+ /* BT-AMP Case */
+ WCN36XX_HAL_LINK_BTAMP_PREASSOC_STATE = 5,
+ WCN36XX_HAL_LINK_BTAMP_POSTASSOC_STATE = 6,
+ WCN36XX_HAL_LINK_BTAMP_AP_STATE = 7,
+ WCN36XX_HAL_LINK_BTAMP_STA_STATE = 8,
+
+ /* Reserved for HAL Internal Use */
+ WCN36XX_HAL_LINK_LEARN_STATE = 9,
+ WCN36XX_HAL_LINK_SCAN_STATE = 10,
+ WCN36XX_HAL_LINK_FINISH_SCAN_STATE = 11,
+ WCN36XX_HAL_LINK_INIT_CAL_STATE = 12,
+ WCN36XX_HAL_LINK_FINISH_CAL_STATE = 13,
+ WCN36XX_HAL_LINK_LISTEN_STATE = 14,
+
+ WCN36XX_HAL_LINK_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stats_mask {
+ HAL_SUMMARY_STATS_INFO = 0x00000001,
+ HAL_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ HAL_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ HAL_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ HAL_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ HAL_PER_STA_STATS_INFO = 0x00000020
+};
+
+/* BT-AMP events type */
+enum bt_amp_event_type {
+ BTAMP_EVENT_CONNECTION_START,
+ BTAMP_EVENT_CONNECTION_STOP,
+ BTAMP_EVENT_CONNECTION_TERMINATED,
+
+ /* This and beyond are invalid values */
+ BTAMP_EVENT_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+/* PE Statistics */
+enum pe_stats_mask {
+ PE_SUMMARY_STATS_INFO = 0x00000001,
+ PE_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ PE_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ PE_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ PE_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ PE_PER_STA_STATS_INFO = 0x00000020,
+
+ /* This and beyond are invalid values */
+ PE_STATS_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/*
+ * Configuration Parameter IDs
+ */
+#define WCN36XX_HAL_CFG_STA_ID 0
+#define WCN36XX_HAL_CFG_CURRENT_TX_ANTENNA 1
+#define WCN36XX_HAL_CFG_CURRENT_RX_ANTENNA 2
+#define WCN36XX_HAL_CFG_LOW_GAIN_OVERRIDE 3
+#define WCN36XX_HAL_CFG_POWER_STATE_PER_CHAIN 4
+#define WCN36XX_HAL_CFG_CAL_PERIOD 5
+#define WCN36XX_HAL_CFG_CAL_CONTROL 6
+#define WCN36XX_HAL_CFG_PROXIMITY 7
+#define WCN36XX_HAL_CFG_NETWORK_DENSITY 8
+#define WCN36XX_HAL_CFG_MAX_MEDIUM_TIME 9
+#define WCN36XX_HAL_CFG_MAX_MPDUS_IN_AMPDU 10
+#define WCN36XX_HAL_CFG_RTS_THRESHOLD 11
+#define WCN36XX_HAL_CFG_SHORT_RETRY_LIMIT 12
+#define WCN36XX_HAL_CFG_LONG_RETRY_LIMIT 13
+#define WCN36XX_HAL_CFG_FRAGMENTATION_THRESHOLD 14
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ZERO 15
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ONE 16
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_TWO 17
+#define WCN36XX_HAL_CFG_FIXED_RATE 18
+#define WCN36XX_HAL_CFG_RETRYRATE_POLICY 19
+#define WCN36XX_HAL_CFG_RETRYRATE_SECONDARY 20
+#define WCN36XX_HAL_CFG_RETRYRATE_TERTIARY 21
+#define WCN36XX_HAL_CFG_FORCE_POLICY_PROTECTION 22
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_24GHZ 23
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_5GHZ 24
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_24GHZ 25
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_5GHZ 26
+#define WCN36XX_HAL_CFG_MAX_BA_SESSIONS 27
+#define WCN36XX_HAL_CFG_PS_DATA_INACTIVITY_TIMEOUT 28
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_FILTER 29
+#define WCN36XX_HAL_CFG_PS_ENABLE_RSSI_MONITOR 30
+#define WCN36XX_HAL_CFG_NUM_BEACON_PER_RSSI_AVERAGE 31
+#define WCN36XX_HAL_CFG_STATS_PERIOD 32
+#define WCN36XX_HAL_CFG_CFP_MAX_DURATION 33
+#define WCN36XX_HAL_CFG_FRAME_TRANS_ENABLED 34
+#define WCN36XX_HAL_CFG_DTIM_PERIOD 35
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBK 36
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBE 37
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVO 38
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVI 39
+#define WCN36XX_HAL_CFG_BA_THRESHOLD_HIGH 40
+#define WCN36XX_HAL_CFG_MAX_BA_BUFFERS 41
+#define WCN36XX_HAL_CFG_RPE_POLLING_THRESHOLD 42
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC0_REG 43
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC1_REG 44
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC2_REG 45
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC3_REG 46
+#define WCN36XX_HAL_CFG_NO_OF_ONCHIP_REORDER_SESSIONS 47
+#define WCN36XX_HAL_CFG_PS_LISTEN_INTERVAL 48
+#define WCN36XX_HAL_CFG_PS_HEART_BEAT_THRESHOLD 49
+#define WCN36XX_HAL_CFG_PS_NTH_BEACON_FILTER 50
+#define WCN36XX_HAL_CFG_PS_MAX_PS_POLL 51
+#define WCN36XX_HAL_CFG_PS_MIN_RSSI_THRESHOLD 52
+#define WCN36XX_HAL_CFG_PS_RSSI_FILTER_PERIOD 53
+#define WCN36XX_HAL_CFG_PS_BROADCAST_FRAME_FILTER_ENABLE 54
+#define WCN36XX_HAL_CFG_PS_IGNORE_DTIM 55
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_EARLY_TERM 56
+#define WCN36XX_HAL_CFG_DYNAMIC_PS_POLL_VALUE 57
+#define WCN36XX_HAL_CFG_PS_NULLDATA_AP_RESP_TIMEOUT 58
+#define WCN36XX_HAL_CFG_TELE_BCN_WAKEUP_EN 59
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI 60
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS 61
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI 62
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI_IDLE_BCNS 63
+#define WCN36XX_HAL_CFG_TX_PWR_CTRL_ENABLE 64
+#define WCN36XX_HAL_CFG_VALID_RADAR_CHANNEL_LIST 65
+#define WCN36XX_HAL_CFG_TX_POWER_24_20 66
+#define WCN36XX_HAL_CFG_TX_POWER_24_40 67
+#define WCN36XX_HAL_CFG_TX_POWER_50_20 68
+#define WCN36XX_HAL_CFG_TX_POWER_50_40 69
+#define WCN36XX_HAL_CFG_MCAST_BCAST_FILTER_SETTING 70
+#define WCN36XX_HAL_CFG_BCN_EARLY_TERM_WAKEUP_INTERVAL 71
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_2_4 72
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_5 73
+#define WCN36XX_HAL_CFG_INFRA_STA_KEEP_ALIVE_PERIOD 74
+#define WCN36XX_HAL_CFG_ENABLE_CLOSE_LOOP 75
+#define WCN36XX_HAL_CFG_BTC_EXECUTION_MODE 76
+#define WCN36XX_HAL_CFG_BTC_DHCP_BT_SLOTS_TO_BLOCK 77
+#define WCN36XX_HAL_CFG_BTC_A2DP_DHCP_BT_SUB_INTERVALS 78
+#define WCN36XX_HAL_CFG_PS_TX_INACTIVITY_TIMEOUT 79
+#define WCN36XX_HAL_CFG_WCNSS_API_VERSION 80
+#define WCN36XX_HAL_CFG_AP_KEEPALIVE_TIMEOUT 81
+#define WCN36XX_HAL_CFG_GO_KEEPALIVE_TIMEOUT 82
+#define WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST 83
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_BT 84
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_BT 85
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_BT 86
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_BT 87
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_WLAN 88
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_WLAN 89
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_WLAN 90
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_WLAN 91
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_BT 92
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_WLAN 93
+#define WCN36XX_HAL_CFG_BTC_MAX_SCO_BLOCK_PERC 94
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_A2DP 95
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_SCO 96
+#define WCN36XX_HAL_CFG_ENABLE_UNICAST_FILTER 97
+#define WCN36XX_HAL_CFG_MAX_ASSOC_LIMIT 98
+#define WCN36XX_HAL_CFG_ENABLE_LPWR_IMG_TRANSITION 99
+#define WCN36XX_HAL_CFG_ENABLE_MCC_ADAPTIVE_SCHEDULER 100
+#define WCN36XX_HAL_CFG_ENABLE_DETECT_PS_SUPPORT 101
+#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102
+#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103
+#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104
+#define WCN36XX_HAL_CFG_MAX_PARAMS 105
+
+/* Message definitons - All the messages below need to be packed */
+
+/* Definition for HAL API Version. */
+struct wcnss_wlan_version {
+ u8 revision;
+ u8 version;
+ u8 minor;
+ u8 major;
+} __packed;
+
+/* Definition for Encryption Keys */
+struct wcn36xx_hal_keys {
+ u8 id;
+
+ /* 0 for multicast */
+ u8 unicast;
+
+ enum ani_key_direction direction;
+
+ /* Usage is unknown */
+ u8 rsc[WLAN_MAX_KEY_RSC_LEN];
+
+ /* =1 for authenticator,=0 for supplicant */
+ u8 pae_role;
+
+ u16 length;
+ u8 key[WCN36XX_HAL_MAC_MAX_KEY_LENGTH];
+} __packed;
+
+/*
+ * set_sta_key_params Moving here since it is shared by
+ * configbss/setstakey msgs
+ */
+struct wcn36xx_hal_set_sta_key_params {
+ /* STA Index */
+ u16 sta_index;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* STATIC/DYNAMIC - valid only for WEP */
+ enum ani_wep_type wep_type;
+
+ /* Default WEP key, valid only for static WEP, must between 0 and 3. */
+ u8 def_wep_idx;
+
+ /* valid only for non-static WEP encyrptions */
+ struct wcn36xx_hal_keys key[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /*
+ * Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX
+ */
+ u8 single_tid_rc;
+
+} __packed;
+
+/* 4-byte control message header used by HAL*/
+struct wcn36xx_hal_msg_header {
+ enum wcn36xx_hal_host_msg_type msg_type:16;
+ enum wcn36xx_hal_host_msg_version msg_version:16;
+ u32 len;
+} __packed;
+
+/* Config format required by HAL for each CFG item*/
+struct wcn36xx_hal_cfg {
+ /* Cfg Id. The Id required by HAL is exported by HAL
+ * in shared header file between UMAC and HAL.*/
+ u16 id;
+
+ /* Length of the Cfg. This parameter is used to go to next cfg
+ * in the TLV format.*/
+ u16 len;
+
+ /* Padding bytes for unaligned address's */
+ u16 pad_bytes;
+
+ /* Reserve bytes for making cfgVal to align address */
+ u16 reserve;
+
+ /* Following the uCfgLen field there should be a 'uCfgLen' bytes
+ * containing the uCfgValue ; u8 uCfgValue[uCfgLen] */
+} __packed;
+
+struct wcn36xx_hal_mac_start_parameters {
+ /* Drive Type - Production or FTM etc */
+ enum driver_type type;
+
+ /* Length of the config buffer */
+ u32 len;
+
+ /* Following this there is a TLV formatted buffer of length
+ * "len" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+} __packed;
+
+struct wcn36xx_hal_mac_start_req_msg {
+ /* config buffer must start in TLV format just here */
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_parameters params;
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_params {
+ /* success or failure */
+ u16 status;
+
+ /* Max number of STA supported by the device */
+ u8 stations;
+
+ /* Max number of BSS supported by the device */
+ u8 bssids;
+
+ /* API Version */
+ struct wcnss_wlan_version version;
+
+ /* CRM build information */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH];
+
+ /* hardware/chipset/misc version information */
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH];
+
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_rsp_params start_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_params {
+ /* The reason for which the device is being stopped */
+ enum wcn36xx_hal_stop_type reason;
+
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_stop_req_params stop_req_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_update_cfg_req_msg {
+ /*
+ * Note: The length specified in tHalUpdateCfgReqMsg messages should be
+ * header.msgLen = sizeof(tHalUpdateCfgReqMsg) + uConfigBufferLen
+ */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Length of the config buffer. Allows UMAC to update multiple CFGs */
+ u32 len;
+
+ /*
+ * Following this there is a TLV formatted buffer of length
+ * "uConfigBufferLen" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+
+} __packed;
+
+struct wcn36xx_hal_update_cfg_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+/* Frame control field format (2 bytes) */
+struct wcn36xx_hal_mac_frame_ctl {
+
+#ifndef ANI_LITTLE_BIT_ENDIAN
+
+ u8 subType:4;
+ u8 type:2;
+ u8 protVer:2;
+
+ u8 order:1;
+ u8 wep:1;
+ u8 moreData:1;
+ u8 powerMgmt:1;
+ u8 retry:1;
+ u8 moreFrag:1;
+ u8 fromDS:1;
+ u8 toDS:1;
+
+#else
+
+ u8 protVer:2;
+ u8 type:2;
+ u8 subType:4;
+
+ u8 toDS:1;
+ u8 fromDS:1;
+ u8 moreFrag:1;
+ u8 retry:1;
+ u8 powerMgmt:1;
+ u8 moreData:1;
+ u8 wep:1;
+ u8 order:1;
+
+#endif
+
+};
+
+/* Sequence control field */
+struct wcn36xx_hal_mac_seq_ctl {
+ u8 fragNum:4;
+ u8 seqNumLo:4;
+ u8 seqNumHi:8;
+};
+
+/* Management header format */
+struct wcn36xx_hal_mac_mgmt_hdr {
+ struct wcn36xx_hal_mac_frame_ctl fc;
+ u8 durationLo;
+ u8 durationHi;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssId[6];
+ struct wcn36xx_hal_mac_seq_ctl seqControl;
+};
+
+/* FIXME: pronto v1 apparently has 4 */
+#define WCN36XX_HAL_NUM_BSSID 2
+
+/* Scan Entry to hold active BSS idx's */
+struct wcn36xx_hal_scan_entry {
+ u8 bss_index[WCN36XX_HAL_NUM_BSSID];
+ u8 active_bss_count;
+};
+
+struct wcn36xx_hal_init_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_len;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+};
+
+struct wcn36xx_hal_init_scan_con_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+ /* Single NoA usage in Scanning */
+ u8 use_noa;
+
+ /* Indicates the scan duration (in ms) */
+ u16 scan_duration;
+
+};
+
+struct wcn36xx_hal_init_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+struct wcn36xx_hal_start_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to scan */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u32 start_tsf[2];
+ u8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_end_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to stop scanning. Not used really. But
+ * retained for symmetry with "start Scan" message. It can also
+ * help in error check if needed. */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_end_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_finish_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Identifies the operational state of the AP/STA
+ * LEARN - AP Role SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* Operating channel to tune to. */
+ u8 oper_channel;
+
+ /* Channel Bonding state If 20/40 MHz is operational, this will
+ * indicate the 40 MHz extension channel in combination with the
+ * control channel */
+ enum phy_chan_bond_state cb_state;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+} __packed;
+
+struct wcn36xx_hal_finish_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+enum wcn36xx_hal_rate_index {
+ HW_RATE_INDEX_1MBPS = 0x82,
+ HW_RATE_INDEX_2MBPS = 0x84,
+ HW_RATE_INDEX_5_5MBPS = 0x8B,
+ HW_RATE_INDEX_6MBPS = 0x0C,
+ HW_RATE_INDEX_9MBPS = 0x12,
+ HW_RATE_INDEX_11MBPS = 0x96,
+ HW_RATE_INDEX_12MBPS = 0x18,
+ HW_RATE_INDEX_18MBPS = 0x24,
+ HW_RATE_INDEX_24MBPS = 0x30,
+ HW_RATE_INDEX_36MBPS = 0x48,
+ HW_RATE_INDEX_48MBPS = 0x60,
+ HW_RATE_INDEX_54MBPS = 0x6C
+};
+
+struct wcn36xx_hal_supported_rates {
+ /*
+ * For Self STA Entry: this represents Self Mode.
+ * For Peer Stations, this represents the mode of the peer.
+ * On Station:
+ *
+ * --this mode is updated when PE adds the Self Entry.
+ *
+ * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+ * is used to indicate the mode of the AP.
+ *
+ * ON AP:
+ *
+ * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+ * for that BSS is used to indicate the self mode of the AP.
+ *
+ * -- OR when a station is associated, PE sends 'ADD_STA' message
+ * with this mode updated.
+ */
+
+ enum sta_rate_mode op_rate_mode;
+
+ /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+ * unit of 500Kbps */
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+ u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+ u16 reserved;
+
+ /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+ * supported) First 26 bits are reserved for those Titan rates and
+ * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+ * reserved. */
+ /* Titan and Taurus Rates */
+ u32 enhanced_rate_bitmap;
+
+ /*
+ * 0-76 bits used, remaining reserved
+ * bits 0-15 and 32 should be set.
+ */
+ u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+ /*
+ * RX Highest Supported Data Rate defines the highest data
+ * rate that the STA is able to receive, in unites of 1Mbps.
+ * This value is derived from "Supported MCS Set field" inside
+ * the HT capability element.
+ */
+ u16 rx_highest_data_rate;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_params {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* TODO move this parameter to the end for 3680 */
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* TODO add this parameter for 3680. */
+ /* Reserved to align next field on a dword boundary */
+ /* u8 reserved; */
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params sta_params;
+} __packed;
+
+struct wcn36xx_hal_config_sta_params_v1 {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params_v1 sta_params;
+} __packed;
+
+struct config_sta_rsp_params {
+ /* success or failure */
+ u32 status;
+
+ /* Station index; valid only when 'status' field value SUCCESS */
+ u8 sta_index;
+
+ /* BSSID Index of BSS to which the station is associated */
+ u8 bssid_index;
+
+ /* DPU Index for PTK */
+ u8 dpu_index;
+
+ /* DPU Index for GTK */
+ u8 bcast_dpu_index;
+
+ /* DPU Index for IGTK */
+ u8 bcast_mgmt_dpu_idx;
+
+ /* PTK DPU signature */
+ u8 uc_ucast_sig;
+
+ /* GTK DPU isignature */
+ u8 uc_bcast_sig;
+
+ /* IGTK DPU signature */
+ u8 uc_mgmt_sig;
+
+ u8 p2p;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct config_sta_rsp_params params;
+} __packed;
+
+/* Delete STA Request message */
+struct wcn36xx_hal_delete_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Index of STA to delete */
+ u8 sta_index;
+
+} __packed;
+
+/* Delete STA Response message */
+struct wcn36xx_hal_delete_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Index of STA deleted */
+ u8 sta_id;
+} __packed;
+
+/* 12 Bytes long because this structure can be used to represent rate and
+ * extended rate set IEs. The parser assume this to be at least 12 */
+struct wcn36xx_hal_rate_set {
+ u8 num_rates;
+ u8 rate[WCN36XX_HAL_MAC_RATESET_EID_MAX];
+} __packed;
+
+/* access category record */
+struct wcn36xx_hal_aci_aifsn {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:1;
+ u8 aci:2;
+ u8 acm:1;
+ u8 aifsn:4;
+#else
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8 rsvd:1;
+#endif
+} __packed;
+
+/* contention window size */
+struct wcn36xx_hal_mac_cw {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 max:4;
+ u8 min:4;
+#else
+ u8 min:4;
+ u8 max:4;
+#endif
+} __packed;
+
+struct wcn36xx_hal_edca_param_record {
+ struct wcn36xx_hal_aci_aifsn aci;
+ struct wcn36xx_hal_mac_cw cw;
+ u16 txop_limit;
+} __packed;
+
+struct wcn36xx_hal_mac_ssid {
+ u8 length;
+ u8 ssid[32];
+} __packed;
+
+/* Concurrency role. These are generic IDs that identify the various roles
+ * in the software system. */
+enum wcn36xx_hal_con_mode {
+ WCN36XX_HAL_STA_MODE = 0,
+
+ /* to support softAp mode . This is misleading.
+ It means AP MODE only. */
+ WCN36XX_HAL_STA_SAP_MODE = 1,
+
+ WCN36XX_HAL_P2P_CLIENT_MODE,
+ WCN36XX_HAL_P2P_GO_MODE,
+ WCN36XX_HAL_MONITOR_MODE,
+};
+
+/* This is a bit pattern to be set for each mode
+ * bit 0 - sta mode
+ * bit 1 - ap mode
+ * bit 2 - p2p client mode
+ * bit 3 - p2p go mode */
+enum wcn36xx_hal_concurrency_mode {
+ HAL_STA = 1,
+ HAL_SAP = 2,
+
+ /* to support sta, softAp mode . This means STA+AP mode */
+ HAL_STA_SAP = 3,
+
+ HAL_P2P_CLIENT = 4,
+ HAL_P2P_GO = 8,
+ HAL_MAX_CONCURRENCY_PERSONA = 4
+};
+
+struct wcn36xx_hal_config_bss_params {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* TODO move sta to the end for 3680 */
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params sta;
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_params_v1 {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params_v1 sta;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params_v1 bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_params {
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index allocated by HAL */
+ u8 bss_index;
+
+ /* DPU descriptor index for PTK */
+ u8 dpu_desc_index;
+
+ /* PTK DPU signature */
+ u8 ucast_dpu_signature;
+
+ /* DPU descriptor index for GTK */
+ u8 bcast_dpu_desc_indx;
+
+ /* GTK DPU signature */
+ u8 bcast_dpu_signature;
+
+ /* DPU descriptor for IGTK */
+ u8 mgmt_dpu_desc_index;
+
+ /* IGTK DPU signature */
+ u8 mgmt_dpu_signature;
+
+ /* Station Index for BSS entry */
+ u8 bss_sta_index;
+
+ /* Self station index for this BSS */
+ u8 bss_self_sta_index;
+
+ /* Bcast station for buffering bcast frames in AP role */
+ u8 bss_bcast_sta_idx;
+
+ /* MAC Address of STA(PEER/SELF) in staContext of configBSSReq */
+ u8 mac[ETH_ALEN];
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ s8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_delete_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS index to be deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_delete_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index that has been deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_join_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the BSSID to which STA is going to associate */
+ u8 bssid[ETH_ALEN];
+
+ /* Indicates the channel to switch to. */
+ u8 channel;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* link State */
+ enum wcn36xx_hal_link_state link_state;
+
+ /* Max TX power */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_join_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+} __packed;
+
+struct post_assoc_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct wcn36xx_hal_config_sta_params sta_params;
+ struct wcn36xx_hal_config_bss_params bss_params;
+};
+
+struct post_assoc_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct config_sta_rsp_params sta_rsp_params;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+};
+
+/* This is used to create a set of WEP keys for a given BSS. */
+struct wcn36xx_hal_set_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Number of keys */
+ u8 num_keys;
+
+ /* Array of keys. */
+ struct wcn36xx_hal_keys keys[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /* Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX */
+ u8 single_tid_rc;
+} __packed;
+
+/* tagged version of set bss key */
+struct wcn36xx_hal_set_bss_key_req_msg_tagged {
+ struct wcn36xx_hal_set_bss_key_req_msg Msg;
+ u32 tag;
+} __packed;
+
+struct wcn36xx_hal_set_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used configure the key information on a given station.
+ * When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
+ * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a new key descriptor is created based on the key field.
+ */
+struct wcn36xx_hal_set_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_set_sta_key_params set_sta_key_params;
+} __packed;
+
+struct wcn36xx_hal_set_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* STATIC/DYNAMIC. Used in Nullifying in Key Descriptors for
+ * Static/Dynamic keys */
+ enum ani_wep_type wep_type;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used by PE to Remove the key information on a given station.
+ */
+struct wcn36xx_hal_remove_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA Index */
+ u16 sta_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* Whether to invalidate the Broadcast key or Unicast key. In case
+ * of WEP, the same key is used for both broadcast and unicast. */
+ u8 unicast;
+
+} __packed;
+
+struct wcn36xx_hal_remove_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+} __packed;
+
+#ifdef FEATURE_OEM_DATA_SUPPORT
+
+#ifndef OEM_DATA_REQ_SIZE
+#define OEM_DATA_REQ_SIZE 134
+#endif
+
+#ifndef OEM_DATA_RSP_SIZE
+#define OEM_DATA_RSP_SIZE 1968
+#endif
+
+struct start_oem_data_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ tSirMacAddr self_mac_addr;
+ u8 oem_data_req[OEM_DATA_REQ_SIZE];
+
+};
+
+struct start_oem_data_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 oem_data_rsp[OEM_DATA_RSP_SIZE];
+};
+
+#endif
+
+struct wcn36xx_hal_switch_channel_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Channel number */
+ u8 channel_number;
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ u8 tx_mgmt_power;
+
+ /* Max TX power */
+ u8 max_tx_power;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* VO WIFI comment: BSSID needed to identify session. As the
+ * request has power constraints, this should be applied only to
+ * that session Since MTU timing and EDCA are sessionized, this
+ * struct needs to be sessionized and bssid needs to be out of the
+ * VOWifi feature flag V IMP: Keep bssId field at the end of this
+ * msg. It is used to mantain backward compatbility by way of
+ * ignoring if using new host/old FW or old host/new FW since it is
+ * at the end of this struct
+ */
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_switch_channel_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Status */
+ u32 status;
+
+ /* Channel number - same as in request */
+ u8 channel_number;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+
+ /* BSSID needed to identify session - same as in request */
+ u8 bssid[ETH_ALEN];
+
+} __packed;
+
+struct update_edca_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*BSS Index */
+ u16 bss_index;
+
+ /* Best Effort */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* Background */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* Video */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* Voice */
+ struct wcn36xx_hal_edca_param_record acvo;
+};
+
+struct update_edca_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct dpu_stats_params {
+ /* Index of STA to which the statistics */
+ u16 sta_index;
+
+ /* Encryption mode */
+ u8 enc_mode;
+
+ /* status */
+ u32 status;
+
+ /* Statistics */
+ u32 send_blocks;
+ u32 recv_blocks;
+ u32 replays;
+ u8 mic_error_cnt;
+ u32 prot_excl_cnt;
+ u16 format_err_cnt;
+ u16 un_decryptable_cnt;
+ u32 decrypt_err_cnt;
+ u32 decrypt_ok_cnt;
+};
+
+struct wcn36xx_hal_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+
+ /* Categories of stats requested as specified in eHalStatsMask */
+ u32 stats_mask;
+};
+
+struct ani_summary_stats_info {
+ /* Total number of packets(per AC) that were successfully
+ * transmitted with retries */
+ u32 retry_cnt[4];
+
+ /* The number of MSDU packets and MMPDU frames per AC that the
+ * 802.11 station successfully transmitted after more than one
+ * retransmission attempt */
+ u32 multiple_retry_cnt[4];
+
+ /* Total number of packets(per AC) that were successfully
+ * transmitted (with and without retries, including multi-cast,
+ * broadcast) */
+ u32 tx_frm_cnt[4];
+
+ /* Total number of packets that were successfully received (after
+ * appropriate filter rules including multi-cast, broadcast) */
+ u32 rx_frm_cnt;
+
+ /* Total number of duplicate frames received successfully */
+ u32 frm_dup_cnt;
+
+ /* Total number packets(per AC) failed to transmit */
+ u32 fail_cnt[4];
+
+ /* Total number of RTS/CTS sequence failures for transmission of a
+ * packet */
+ u32 rts_fail_cnt;
+
+ /* Total number packets failed transmit because of no ACK from the
+ * remote entity */
+ u32 ack_fail_cnt;
+
+ /* Total number of RTS/CTS sequence success for transmission of a
+ * packet */
+ u32 rts_succ_cnt;
+
+ /* The sum of the receive error count and dropped-receive-buffer
+ * error count. HAL will provide this as a sum of (FCS error) +
+ * (Fail get BD/PDU in HW) */
+ u32 rx_discard_cnt;
+
+ /*
+ * The receive error count. HAL will provide the RxP FCS error
+ * global counter. */
+ u32 rx_error_cnt;
+
+ /* The sum of the transmit-directed byte count, transmit-multicast
+ * byte count and transmit-broadcast byte count. HAL will sum TPE
+ * UC/MC/BCAST global counters to provide this. */
+ u32 tx_byte_cnt;
+};
+
+/* defines tx_rate_flags */
+enum tx_rate_info {
+ /* Legacy rates */
+ HAL_TX_RATE_LEGACY = 0x1,
+
+ /* HT20 rates */
+ HAL_TX_RATE_HT20 = 0x2,
+
+ /* HT40 rates */
+ HAL_TX_RATE_HT40 = 0x4,
+
+ /* Rate with Short guard interval */
+ HAL_TX_RATE_SGI = 0x8,
+
+ /* Rate with Long guard interval */
+ HAL_TX_RATE_LGI = 0x10
+};
+
+struct ani_global_class_a_stats_info {
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames */
+ u32 rx_frag_cnt;
+
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames when a promiscuous packet filter
+ * was enabled */
+ u32 promiscuous_rx_frag_cnt;
+
+ /* The receiver input sensitivity referenced to a FER of 8% at an
+ * MPDU length of 1024 bytes at the antenna connector. Each element
+ * of the array shall correspond to a supported rate and the order
+ * shall be the same as the supporteRates parameter. */
+ u32 rx_input_sensitivity;
+
+ /* The maximum transmit power in dBm upto one decimal. for eg: if
+ * it is 10.5dBm, the value would be 105 */
+ u32 max_pwr;
+
+ /* Number of times the receiver failed to synchronize with the
+ * incoming signal after detecting the sync in the preamble of the
+ * transmitted PLCP protocol data unit. */
+ u32 sync_fail_cnt;
+
+ /* Legacy transmit rate, in units of 500 kbit/sec, for the most
+ * recently transmitted frame */
+ u32 tx_rate;
+
+ /* mcs index for HT20 and HT40 rates */
+ u32 mcs_index;
+
+ /* to differentiate between HT20 and HT40 rates; short and long
+ * guard interval */
+ u32 tx_rate_flags;
+};
+
+struct ani_global_security_stats {
+ /* The number of unencrypted received MPDU frames that the MAC
+ * layer discarded when the IEEE 802.11 dot11ExcludeUnencrypted
+ * management information base (MIB) object is enabled */
+ u32 rx_wep_unencrypted_frm_cnt;
+
+ /* The number of received MSDU packets that that the 802.11 station
+ * discarded because of MIC failures */
+ u32 rx_mic_fail_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a TKIP ICV error */
+ u32 tkip_icv_err;
+
+ /* The number of received MPDU frames that the 802.11 discarded
+ * because of an invalid AES-CCMP format */
+ u32 aes_ccmp_format_err;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of the AES-CCMP replay protection procedure */
+ u32 aes_ccmp_replay_cnt;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of errors detected by the AES-CCMP decryption
+ * algorithm */
+ u32 aes_ccmp_decrpt_err;
+
+ /* The number of encrypted MPDU frames received for which a WEP
+ * decryption key was not available on the 802.11 station */
+ u32 wep_undecryptable_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a WEP ICV error */
+ u32 wep_icv_err;
+
+ /* The number of received encrypted packets that the 802.11 station
+ * successfully decrypted */
+ u32 rx_decrypt_succ_cnt;
+
+ /* The number of encrypted packets that the 802.11 station failed
+ * to decrypt */
+ u32 rx_decrypt_fail_cnt;
+};
+
+struct ani_global_class_b_stats_info {
+ struct ani_global_security_stats uc_stats;
+ struct ani_global_security_stats mc_bc_stats;
+};
+
+struct ani_global_class_c_stats_info {
+ /* This counter shall be incremented for a received A-MSDU frame
+ * with the stations MAC address in the address 1 field or an
+ * A-MSDU frame with a group address in the address 1 field */
+ u32 rx_amsdu_cnt;
+
+ /* This counter shall be incremented when the MAC receives an AMPDU
+ * from the PHY */
+ u32 rx_ampdu_cnt;
+
+ /* This counter shall be incremented when a Frame is transmitted
+ * only on the primary channel */
+ u32 tx_20_frm_cnt;
+
+ /* This counter shall be incremented when a Frame is received only
+ * on the primary channel */
+ u32 rx_20_frm_cnt;
+
+ /* This counter shall be incremented by the number of MPDUs
+ * received in the A-MPDU when an A-MPDU is received */
+ u32 rx_mpdu_in_ampdu_cnt;
+
+ /* This counter shall be incremented when an MPDU delimiter has a
+ * CRC error when this is the first CRC error in the received AMPDU
+ * or when the previous delimiter has been decoded correctly */
+ u32 ampdu_delimiter_crc_err;
+};
+
+struct ani_per_sta_stats_info {
+ /* The number of MPDU frames that the 802.11 station transmitted
+ * and acknowledged through a received 802.11 ACK frame */
+ u32 tx_frag_cnt[4];
+
+ /* This counter shall be incremented when an A-MPDU is transmitted */
+ u32 tx_ampdu_cnt;
+
+ /* This counter shall increment by the number of MPDUs in the AMPDU
+ * when an A-MPDU is transmitted */
+ u32 tx_mpdu_in_ampdu_cnt;
+};
+
+struct wcn36xx_hal_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* STA Idx */
+ u32 sta_index;
+
+ /* Categories of STATS being returned as per eHalStatsMask */
+ u32 stats_mask;
+
+ /* message type is same as the request type */
+ u16 msg_type;
+
+ /* length of the entire request, includes the pStatsBuf length too */
+ u16 msg_len;
+};
+
+struct wcn36xx_hal_set_link_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ enum wcn36xx_hal_link_state state;
+ u8 self_mac_addr[ETH_ALEN];
+
+} __packed;
+
+struct set_link_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* TSPEC Params */
+struct wcn36xx_hal_ts_info_tfc {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u16 ackPolicy:2;
+ u16 userPrio:3;
+ u16 psb:1;
+ u16 aggregation:1;
+ u16 accessPolicy:2;
+ u16 direction:2;
+ u16 tsid:4;
+ u16 trafficType:1;
+#else
+ u16 trafficType:1;
+ u16 tsid:4;
+ u16 direction:2;
+ u16 accessPolicy:2;
+ u16 aggregation:1;
+ u16 psb:1;
+ u16 userPrio:3;
+ u16 ackPolicy:2;
+#endif
+};
+
+/* Flag to schedule the traffic type */
+struct wcn36xx_hal_ts_info_sch {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:7;
+ u8 schedule:1;
+#else
+ u8 schedule:1;
+ u8 rsvd:7;
+#endif
+};
+
+/* Traffic and scheduling info */
+struct wcn36xx_hal_ts_info {
+ struct wcn36xx_hal_ts_info_tfc traffic;
+ struct wcn36xx_hal_ts_info_sch schedule;
+};
+
+/* Information elements */
+struct wcn36xx_hal_tspec_ie {
+ u8 type;
+ u8 length;
+ struct wcn36xx_hal_ts_info ts_info;
+ u16 nom_msdu_size;
+ u16 max_msdu_size;
+ u32 min_svc_interval;
+ u32 max_svc_interval;
+ u32 inact_interval;
+ u32 suspend_interval;
+ u32 svc_start_time;
+ u32 min_data_rate;
+ u32 mean_data_rate;
+ u32 peak_data_rate;
+ u32 max_burst_sz;
+ u32 delay_bound;
+ u32 min_phy_rate;
+ u16 surplus_bw;
+ u16 medium_time;
+};
+
+struct add_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct add_rs_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct del_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To lookup station id using the mac address */
+ u8 bssid[ETH_ALEN];
+};
+
+struct del_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* End of TSpec Parameters */
+
+/* Start of BLOCK ACK related Parameters */
+
+struct wcn36xx_hal_add_ba_session_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* Peer MAC Address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* ADDBA Action Frame dialog token
+ HAL will not interpret this object */
+ u8 dialog_token;
+
+ /* TID for which the BA is being setup
+ This identifies the TC or TS of interest */
+ u8 tid;
+
+ /* 0 - Delayed BA (Not supported)
+ 1 - Immediate BA */
+ u8 policy;
+
+ /* Indicates the number of buffers for this TID (baTID)
+ NOTE - This is the requested buffer size. When this
+ is processed by HAL and subsequently by HDD, it is
+ possible that HDD may change this buffer size. Any
+ change in the buffer size should be noted by PE and
+ advertized appropriately in the ADDBA response */
+ u16 buffer_size;
+
+ /* BA timeout in TU's 0 means no timeout will occur */
+ u16 timeout;
+
+ /* b0..b3 - Fragment Number - Always set to 0
+ b4..b15 - Starting Sequence Number of first MSDU
+ for which this BA is setup */
+ u16 ssn;
+
+ /* ADDBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_add_ba_session_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+
+ /* TID for which the BA session has been setup */
+ u8 ba_tid;
+
+ /* BA Buffer Size allocated for the current BA session */
+ u8 ba_buffer_size;
+
+ u8 ba_session_id;
+
+ /* Reordering Window buffer */
+ u8 win_size;
+
+ /* Station Index to id the sta */
+ u8 sta_index;
+
+ /* Starting Sequence Number */
+ u16 ssn;
+} __packed;
+
+struct wcn36xx_hal_add_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* Reorder Window Size */
+ u8 win_size;
+/* Old FW 1.2.2.4 does not support this*/
+#ifdef FEATURE_ON_CHIP_REORDERING
+ u8 reordering_done_on_chip;
+#endif
+} __packed;
+
+struct wcn36xx_hal_add_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+} __packed;
+
+struct add_ba_info {
+ u16 ba_enable:1;
+ u16 starting_seq_num:12;
+ u16 reserved:3;
+};
+
+struct wcn36xx_hal_trigger_ba_rsp_candidate {
+ u8 sta_addr[ETH_ALEN];
+ struct add_ba_info ba_info[STACFG_MAX_TC];
+} __packed;
+
+struct wcn36xx_hal_trigget_ba_req_candidate {
+ u8 sta_index;
+ u8 tid_bitmap;
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Candidate List(tTriggerBaCandidate)
+ */
+ u16 candidate_cnt;
+
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Rsp Candidate List(tTriggerRspBaCandidate)
+ */
+ u16 candidate_cnt;
+} __packed;
+
+struct wcn36xx_hal_del_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TID for which the BA session is being deleted */
+ u8 tid;
+
+ /* DELBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_del_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct tsm_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Traffic Id */
+ u8 tid;
+
+ u8 bssid[ETH_ALEN];
+};
+
+struct tsm_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ /* Uplink Packet Queue delay */
+ u16 uplink_pkt_queue_delay;
+
+ /* Uplink Packet Queue delay histogram */
+ u16 uplink_pkt_queue_delay_hist[4];
+
+ /* Uplink Packet Transmit delay */
+ u32 uplink_pkt_tx_delay;
+
+ /* Uplink Packet loss */
+ u16 uplink_pkt_loss;
+
+ /* Uplink Packet count */
+ u16 uplink_pkt_count;
+
+ /* Roaming count */
+ u8 roaming_count;
+
+ /* Roaming Delay */
+ u16 roaming_delay;
+};
+
+struct set_key_done_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*bssid of the keys */
+ u8 bssidx;
+ u8 enc_type;
+};
+
+struct wcn36xx_hal_nv_img_download_req_msg {
+ /* Note: The length specified in wcn36xx_hal_nv_img_download_req_msg
+ * messages should be
+ * header.len = sizeof(wcn36xx_hal_nv_img_download_req_msg) +
+ * nv_img_buffer_size */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Fragment sequence number of the NV Image. Note that NV Image
+ * might not fit into one message due to size limitation of the SMD
+ * channel FIFO. UMAC can hence choose to chop the NV blob into
+ * multiple fragments starting with seqeunce number 0, 1, 2 etc.
+ * The last fragment MUST be indicated by marking the
+ * isLastFragment field to 1. Note that all the NV blobs would be
+ * concatenated together by HAL without any padding bytes in
+ * between.*/
+ u16 frag_number;
+
+ /* Is this the last fragment? When set to 1 it indicates that no
+ * more fragments will be sent by UMAC and HAL can concatenate all
+ * the NV blobs rcvd & proceed with the parsing. HAL would generate
+ * a WCN36XX_HAL_DOWNLOAD_NV_RSP to the WCN36XX_HAL_DOWNLOAD_NV_REQ
+ * after it receives each fragment */
+ u16 last_fragment;
+
+ /* NV Image size (number of bytes) */
+ u32 nv_img_buffer_size;
+
+ /* Following the 'nv_img_buffer_size', there should be
+ * nv_img_buffer_size bytes of NV Image i.e.
+ * u8[nv_img_buffer_size] */
+} __packed;
+
+struct wcn36xx_hal_nv_img_download_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure. HAL would generate a
+ * WCN36XX_HAL_DOWNLOAD_NV_RSP after each fragment */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_nv_store_ind {
+ /* Note: The length specified in tHalNvStoreInd messages should be
+ * header.msgLen = sizeof(tHalNvStoreInd) + nvBlobSize */
+ struct wcn36xx_hal_msg_header header;
+
+ /* NV Item */
+ u32 table_id;
+
+ /* Size of NV Blob */
+ u32 nv_blob_size;
+
+ /* Following the 'nvBlobSize', there should be nvBlobSize bytes of
+ * NV blob i.e. u8[nvBlobSize] */
+};
+
+/* End of Block Ack Related Parameters */
+
+#define WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE 6
+
+/* Definition for MIC failure indication MAC reports this each time a MIC
+ * failure occures on Rx TKIP packet
+ */
+struct mic_failure_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+
+ /* address used to compute MIC */
+ u8 src_addr[ETH_ALEN];
+
+ /* transmitter address */
+ u8 ta_addr[ETH_ALEN];
+
+ u8 dst_addr[ETH_ALEN];
+
+ u8 multicast;
+
+ /* first byte of IV */
+ u8 iv1;
+
+ /* second byte of IV */
+ u8 key_id;
+
+ /* sequence number */
+ u8 tsc[WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE];
+
+ /* receive address */
+ u8 rx_addr[ETH_ALEN];
+};
+
+struct update_vht_op_mode_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 op_mode;
+ u16 sta_id;
+};
+
+struct update_vht_op_mode_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+};
+
+struct update_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* shortPreamble mode. HAL should update all the STA rates when it
+ * receives this message */
+ u8 short_preamble;
+
+ /* short Slot time. */
+ u8 short_slot_time;
+
+ /* Beacon Interval */
+ u16 beacon_interval;
+
+ /* Protection related */
+ u8 lla_coexist;
+ u8 llb_coexist;
+ u8 llg_coexist;
+ u8 ht20_coexist;
+ u8 lln_non_gf_coexist;
+ u8 lsig_tx_op_protection_full_support;
+ u8 rifs_mode;
+
+ u16 param_change_bitmap;
+};
+
+struct update_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+};
+
+struct wcn36xx_hal_send_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* length of the template. */
+ u32 beacon_length;
+
+ /* Beacon data. */
+ u8 beacon[BEACON_TEMPLATE_SIZE];
+
+ u8 bssid[ETH_ALEN];
+
+ /* TIM IE offset from the beginning of the template. */
+ u32 tim_ie_offset;
+
+ /* P2P IE offset from the begining of the template */
+ u16 p2p_ie_offset;
+} __packed;
+
+struct send_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+} __packed;
+
+struct enable_radar_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ u8 channel;
+};
+
+struct enable_radar_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Link Parameters */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+};
+
+struct radar_detect_intr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 radar_det_channel;
+};
+
+struct radar_detect_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* channel number in which the RADAR detected */
+ u8 channel_number;
+
+ /* RADAR pulse width in usecond */
+ u16 radar_pulse_width;
+
+ /* Number of RADAR pulses */
+ u16 num_radar_pulse;
+};
+
+struct wcn36xx_hal_get_tpc_report_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta[ETH_ALEN];
+ u8 dialog_token;
+ u8 txpower;
+};
+
+struct wcn36xx_hal_get_tpc_report_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_send_probe_resp_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 probe_resp_template[BEACON_TEMPLATE_SIZE];
+ u32 probe_resp_template_len;
+ u32 proxy_probe_req_valid_ie_bmap[8];
+ u8 bssid[ETH_ALEN];
+};
+
+struct send_probe_resp_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct send_unknown_frame_rx_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_delete_sta_context_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 aid;
+ u16 sta_id;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* HAL copies bssid from the sta table. */
+ u8 addr2[ETH_ALEN];
+
+ /* To unify the keepalive / unknown A2 / tim-based disa */
+ u16 reason_code;
+} __packed;
+
+struct indicate_del_sta {
+ struct wcn36xx_hal_msg_header header;
+ u8 aid;
+ u8 sta_index;
+ u8 bss_index;
+ u8 reason_code;
+ u32 status;
+};
+
+struct bt_amp_event_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ enum bt_amp_event_type btAmpEventType;
+};
+
+struct bt_amp_event_rsp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct tl_hal_flush_ac_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+};
+
+struct tl_hal_flush_ac_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_imps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_exit_imps_req {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_enter_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* TBTT value derived from the last beacon */
+#ifndef BUILD_QWPTTSTATIC
+ u64 tbtt;
+#endif
+ u8 dtim_count;
+
+ /* DTIM period given to HAL during association may not be valid, if
+ * association is based on ProbeRsp instead of beacon. */
+ u8 dtim_period;
+
+ /* For CCX and 11R Roaming */
+ u32 rssi_filter_period;
+
+ u32 num_beacon_per_rssi_average;
+ u8 rssi_filter_enable;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 send_data_null;
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_missed_beacon_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+} __packed;
+
+/* Beacon Filtering data structures */
+
+/* The above structure would be followed by multiple of below mentioned
+ * structure
+ */
+struct beacon_filter_ie {
+ u8 element_id;
+ u8 check_ie_presence;
+ u8 offset;
+ u8 value;
+ u8 bitmask;
+ u8 ref;
+};
+
+struct wcn36xx_hal_add_bcn_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 capability_info;
+ u16 capability_mask;
+ u16 beacon_interval;
+ u16 ie_num;
+ u8 bss_index;
+ u8 reserved;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 ie_Count;
+ u8 rem_ie_id[1];
+};
+
+#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0
+#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1
+#define WCN36XX_HAL_IPV6_NS_OFFLOAD 2
+#define WCN36XX_HAL_IPV6_ADDR_LEN 16
+#define WCN36XX_HAL_OFFLOAD_DISABLE 0
+#define WCN36XX_HAL_OFFLOAD_ENABLE 1
+#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE 0x2
+#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE \
+ (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+
+struct wcn36xx_hal_ns_offload_params {
+ u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 self_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ /* Only support 2 possible Network Advertisement IPv6 address */
+ u8 target_ipv6_addr1[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 target_ipv6_addr2[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ u8 self_addr[ETH_ALEN];
+ u8 src_ipv6_addr_valid:1;
+ u8 target_ipv6_addr1_valid:1;
+ u8 target_ipv6_addr2_valid:1;
+ u8 reserved1:5;
+
+ /* make it DWORD aligned */
+ u8 reserved2;
+
+ /* slot index for this offload */
+ u32 slot_index;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_req {
+ u8 offload_Type;
+
+ /* enable or disable */
+ u8 enable;
+
+ union {
+ u8 host_ipv4_addr[4];
+ u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ } u;
+};
+
+struct wcn36xx_hal_host_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_host_offload_req host_offload_params;
+ struct wcn36xx_hal_ns_offload_params ns_offload_params;
+};
+
+/* Packet Types. */
+#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT 1
+#define WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP 2
+
+/* Enable or disable keep alive */
+#define WCN36XX_HAL_KEEP_ALIVE_DISABLE 0
+#define WCN36XX_HAL_KEEP_ALIVE_ENABLE 1
+#define WCN36XX_KEEP_ALIVE_TIME_PERIOD 30 /* unit: s */
+
+/* Keep Alive request. */
+struct wcn36xx_hal_keep_alive_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 packet_type;
+ u32 time_period;
+ u8 host_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_addr[ETH_ALEN];
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_rssi_threshold_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ s8 threshold1:8;
+ s8 threshold2:8;
+ s8 threshold3:8;
+ u8 thres1_pos_notify:1;
+ u8 thres1_neg_notify:1;
+ u8 thres2_pos_notify:1;
+ u8 thres2_neg_notify:1;
+ u8 thres3_pos_notify:1;
+ u8 thres3_neg_notify:1;
+ u8 reserved10:2;
+};
+
+struct wcn36xx_hal_enter_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bk_delivery:1;
+ u8 be_delivery:1;
+ u8 vi_delivery:1;
+ u8 vo_delivery:1;
+ u8 bk_trigger:1;
+ u8 be_trigger:1;
+ u8 vi_trigger:1;
+ u8 vo_trigger:1;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+#define WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE 128
+#define WCN36XX_HAL_WOWL_BCAST_MAX_NUM_PATTERNS 16
+
+struct wcn36xx_hal_wowl_add_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID */
+ u8 id;
+
+ /* Pattern byte offset from beginning of the 802.11 packet to start
+ * of the wake-up pattern */
+ u8 byte_Offset;
+
+ /* Non-Zero Pattern size */
+ u8 size;
+
+ /* Pattern */
+ u8 pattern[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Non-zero pattern mask size */
+ u8 mask_size;
+
+ /* Pattern mask */
+ u8 mask[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern */
+ u8 extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern mask */
+ u8 mask_extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wow_del_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID of the wakeup pattern to be deleted */
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_enter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enables/disables magic packet filtering */
+ u8 magic_packet_enable;
+
+ /* Magic pattern */
+ u8 magic_pattern[ETH_ALEN];
+
+ /* Enables/disables packet pattern filtering in firmware. Enabling
+ * this flag enables broadcast pattern matching in Firmware. If
+ * unicast pattern matching is also desired,
+ * ucUcastPatternFilteringEnable flag must be set tot true as well
+ */
+ u8 pattern_filtering_enable;
+
+ /* Enables/disables unicast packet pattern filtering. This flag
+ * specifies whether we want to do pattern match on unicast packets
+ * as well and not just broadcast packets. This flag has no effect
+ * if the ucPatternFilteringEnable (main controlling flag) is set
+ * to false
+ */
+ u8 ucast_pattern_filtering_enable;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Channel Switch
+ * Action Frame.
+ */
+ u8 wow_channel_switch_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the
+ * Deauthentication Frame.
+ */
+ u8 wow_deauth_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Disassociation
+ * Frame.
+ */
+ u8 wow_disassoc_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it has missed consecutive
+ * beacons. This is a hardware register configuration (NOT a
+ * firmware configuration).
+ */
+ u8 wow_max_missed_beacons;
+
+ /* This configuration is valid only when magicPktEnable=1. This is
+ * a timeout value in units of microsec. It requests hardware to
+ * unconditionally wake up after it has stayed in WoWLAN mode for
+ * some time. Set 0 to disable this feature.
+ */
+ u8 wow_max_sleep;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAP-ID requests embedded in EAPOL frames and use this as a wake
+ * source.
+ */
+ u8 wow_eap_id_request_enable;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAPOL-4WAY requests and use this as a wake source.
+ */
+ u8 wow_eapol_4way_enable;
+
+ /* This configuration allows a host wakeup on an network scan
+ * offload match.
+ */
+ u8 wow_net_scan_offload_match;
+
+ /* This configuration allows a host wakeup on any GTK rekeying
+ * error.
+ */
+ u8 wow_gtk_rekey_error;
+
+ /* This configuration allows a host wakeup on BSS connection loss.
+ */
+ u8 wow_bss_connection_loss;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_get_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_get_roam_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA index */
+ u8 sta_idx;
+
+ /* Access Category */
+ u8 ac;
+
+ /* User Priority */
+ u8 up;
+
+ /* Service Interval */
+ u32 service_interval;
+
+ /* Suspend Interval */
+ u32 suspend_interval;
+
+ /* Delay Interval */
+ u32 delay_interval;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 set_mcst_bcst_filter_setting;
+ u8 set_mcst_bcst_filter;
+};
+
+struct wcn36xx_hal_enter_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_exit_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_enter_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rssi_notification_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 rssi_thres1_pos_cross:1;
+ u32 rssi_thres1_neg_cross:1;
+ u32 rssi_thres2_pos_cross:1;
+ u32 rssi_thres2_neg_cross:1;
+ u32 rssi_thres3_pos_cross:1;
+ u32 rssi_thres3_neg_cross:1;
+ u32 avg_rssi:8;
+ u32 reserved:18;
+
+};
+
+struct wcn36xx_hal_get_rssio_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ s8 rssi;
+
+};
+
+struct wcn36xx_hal_get_roam_rssi_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 sta_id;
+ s8 rssi;
+};
+
+struct wcn36xx_hal_wowl_enter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_add_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_del_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_keep_alive_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_rssi_thresh_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_max_tx_pwr_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSSID is needed to identify which session issued this request.
+ * As the request has power constraints, this should be applied
+ * only to that session */
+ u8 bssid[ETH_ALEN];
+
+ u8 self_addr[ETH_ALEN];
+
+ /* In request, power == MaxTx power to be used. */
+ u8 power;
+};
+
+struct set_max_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* power == tx power used for management frames */
+ u8 power;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+
+ u8 bss_index;
+};
+
+struct set_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct get_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta_id;
+};
+
+struct get_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+};
+
+struct set_p2p_gonoa_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 opp_ps;
+ u32 ct_window;
+ u8 count;
+ u32 duration;
+ u32 interval;
+ u32 single_noa_duration;
+ u8 ps_selection;
+};
+
+struct set_p2p_gonoa_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_sta_self_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_add_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Self STA Index */
+ u8 self_sta_index;
+
+ /* DPU Index (IGTK, PTK, GTK all same) */
+ u8 dpu_index;
+
+ /* DPU Signature */
+ u8 dpu_signature;
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct aggr_add_ts_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_idx;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS.
+ * This will carry the bitmap with the bit positions representing
+ * different AC.s */
+ u16 tspec_index;
+
+ /* Tspec info per AC To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec[WCN36XX_HAL_MAX_AC];
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct aggr_add_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status0;
+
+ /* FIXME PRIMA for future use for 11R */
+ u32 status1;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 is_apps_cpu_awake;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_dump_cmd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+ u32 arg4;
+ u32 arg5;
+} __packed;
+
+struct wcn36xx_hal_dump_cmd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Length of the responce message */
+ u32 rsp_length;
+
+ /* FIXME: Currently considering the the responce will be less than
+ * 100bytes */
+ u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
+} __packed;
+
+#define WLAN_COEX_IND_DATA_SIZE (4)
+#define WLAN_COEX_IND_TYPE_DISABLE_HB_MONITOR (0)
+#define WLAN_COEX_IND_TYPE_ENABLE_HB_MONITOR (1)
+
+struct coex_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Coex Indication Type */
+ u32 type;
+
+ /* Coex Indication Data */
+ u32 data[WLAN_COEX_IND_DATA_SIZE];
+};
+
+struct wcn36xx_hal_tx_compl_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Tx Complete Indication Success or Failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_wlan_host_suspend_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 configured_mcst_bcst_filter_setting;
+ u32 active_session_count;
+};
+
+struct wcn36xx_hal_wlan_exclude_unencrpted_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 dot11_exclude_unencrypted;
+ u8 bssid[ETH_ALEN];
+};
+
+struct noa_attr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 index;
+ u8 opp_ps_flag;
+ u16 ctwin;
+
+ u16 noa1_interval_count;
+ u16 bss_index;
+ u32 noa1_duration;
+ u32 noa1_interval;
+ u32 noa1_starttime;
+
+ u16 noa2_interval_count;
+ u16 reserved2;
+ u32 noa2_duration;
+ u32 noa2_interval;
+ u32 noa2_start_time;
+
+ u32 status;
+};
+
+struct noa_start_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ u32 bss_index;
+};
+
+struct wcn36xx_hal_wlan_host_resume_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 configured_mcst_bcst_filter_setting;
+};
+
+struct wcn36xx_hal_host_resume_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_del_ba_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 sta_idx;
+
+ /* Peer MAC Address, whose BA session has timed out */
+ u8 peer_addr[ETH_ALEN];
+
+ /* TID for which a BA session timeout is being triggered */
+ u8 ba_tid;
+
+ /* DELBA direction
+ * 1 - Originator
+ * 0 - Recipient
+ */
+ u8 direction;
+
+ u32 reason_code;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+};
+
+/* PNO Messages */
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS 26
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX 60
+
+/* Maximum numbers of networks supported by PNO */
+#define WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS 16
+
+/* The number of scan time intervals that can be programmed into PNO */
+#define WCN36XX_HAL_PNO_MAX_SCAN_TIMERS 10
+
+/* Maximum size of the probe template */
+#define WCN36XX_HAL_PNO_MAX_PROBE_SIZE 450
+
+/* Type of PNO enabling:
+ *
+ * Immediate - scanning will start immediately and PNO procedure will be
+ * repeated based on timer
+ *
+ * Suspend - scanning will start at suspend
+ *
+ * Resume - scanning will start on system resume
+ */
+enum pno_mode {
+ PNO_MODE_IMMEDIATE,
+ PNO_MODE_ON_SUSPEND,
+ PNO_MODE_ON_RESUME,
+ PNO_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Authentication type */
+enum auth_type {
+ AUTH_TYPE_ANY = 0,
+ AUTH_TYPE_OPEN_SYSTEM = 1,
+
+ /* Upper layer authentication types */
+ AUTH_TYPE_WPA = 2,
+ AUTH_TYPE_WPA_PSK = 3,
+
+ AUTH_TYPE_RSN = 4,
+ AUTH_TYPE_RSN_PSK = 5,
+ AUTH_TYPE_FT_RSN = 6,
+ AUTH_TYPE_FT_RSN_PSK = 7,
+ AUTH_TYPE_WAPI_WAI_CERTIFICATE = 8,
+ AUTH_TYPE_WAPI_WAI_PSK = 9,
+
+ AUTH_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type */
+enum ed_type {
+ ED_ANY = 0,
+ ED_NONE = 1,
+ ED_WEP = 2,
+ ED_TKIP = 3,
+ ED_CCMP = 4,
+ ED_WPI = 5,
+
+ ED_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* SSID broadcast type */
+enum ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+
+ BCAST_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+struct scan_timer {
+ /* How much it should wait */
+ u32 value;
+
+ /* How many times it should repeat that wait value 0 - keep using
+ * this timer until PNO is disabled */
+ u32 repeat;
+
+ /* e.g: 2 3 4 0 - it will wait 2s between consecutive scans for 3
+ * times - after that it will wait 4s between consecutive scans
+ * until disabled */
+};
+
+/* The network parameters to be sent to the PNO algorithm */
+struct scan_timers_type {
+ /* set to 0 if you wish for PNO to use its default telescopic timer */
+ u8 count;
+
+ /* A set value represents the amount of time that PNO will wait
+ * between two consecutive scan procedures If the desired is for a
+ * uniform timer that fires always at the exact same interval - one
+ * single value is to be set If there is a desire for a more
+ * complex - telescopic like timer multiple values can be set -
+ * once PNO reaches the end of the array it will continue scanning
+ * at intervals presented by the last value */
+ struct scan_timer values[WCN36XX_HAL_PNO_MAX_SCAN_TIMERS];
+};
+
+/* Preferred network list request */
+struct set_pref_netw_list_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type_new {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* SSID broadcast type, normal, hidden or unknown */
+ enum ssid_bcast_type bcast_network_type;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+/* Preferred network list request new */
+struct set_pref_netw_list_req_new {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type_new networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* Preferred network list response */
+struct set_pref_netw_list_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request - just to indicate that PNO has
+ * acknowledged the request and will start scanning */
+ u32 status;
+};
+
+/* Preferred network found indication */
+struct pref_netw_found_ind {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Network that was found with the highest RSSI */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Indicates the RSSI */
+ u8 rssi;
+};
+
+/* RSSI Filter request */
+struct set_rssi_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* RSSI Threshold */
+ u8 rssi_threshold;
+};
+
+/* Set RSSI filter resp */
+struct set_rssi_filter_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_req {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+} __packed;
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct update_scan_params_req_ex {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_set_tx_per_tracking_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* 0: disable, 1:enable */
+ u8 tx_per_tracking_enable;
+
+ /* Check period, unit is sec. */
+ u8 tx_per_tracking_period;
+
+ /* (Fail TX packet)/(Total TX packet) ratio, the unit is 10%. */
+ u8 tx_per_tracking_ratio;
+
+ /* A watermark of check number, once the tx packet exceed this
+ * number, we do the check, default is 5 */
+ u32 tx_per_tracking_watermark;
+};
+
+struct wcn36xx_hal_set_tx_per_tracking_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+};
+
+struct tx_per_hit_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+/* Packet Filtering Definitions Begin */
+#define WCN36XX_HAL_PROTOCOL_DATA_LEN 8
+#define WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS 240
+#define WCN36XX_HAL_MAX_NUM_FILTERS 20
+#define WCN36XX_HAL_MAX_CMP_PER_FILTER 10
+
+enum wcn36xx_hal_receive_packet_filter_type {
+ HAL_RCV_FILTER_TYPE_INVALID,
+ HAL_RCV_FILTER_TYPE_FILTER_PKT,
+ HAL_RCV_FILTER_TYPE_BUFFER_PKT,
+ HAL_RCV_FILTER_TYPE_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_protocol_type {
+ HAL_FILTER_PROTO_TYPE_INVALID,
+ HAL_FILTER_PROTO_TYPE_MAC,
+ HAL_FILTER_PROTO_TYPE_ARP,
+ HAL_FILTER_PROTO_TYPE_IPV4,
+ HAL_FILTER_PROTO_TYPE_IPV6,
+ HAL_FILTER_PROTO_TYPE_UDP,
+ HAL_FILTER_PROTO_TYPE_MAX
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_cmp_flag_type {
+ HAL_FILTER_CMP_TYPE_INVALID,
+ HAL_FILTER_CMP_TYPE_EQUAL,
+ HAL_FILTER_CMP_TYPE_MASK_EQUAL,
+ HAL_FILTER_CMP_TYPE_NOT_EQUAL,
+ HAL_FILTER_CMP_TYPE_MAX
+};
+
+struct wcn36xx_hal_rcv_pkt_filter_params {
+ u8 protocol_layer;
+ u8 cmp_flag;
+
+ /* Length of the data to compare */
+ u16 data_length;
+
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ /* Reserved field */
+ u8 reserved;
+
+ /* Data to compare */
+ u8 compare_data[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+
+ /* Mask to be applied on the received packet data before compare */
+ u8 data_mask[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+};
+
+struct wcn36xx_hal_sessionized_rcv_pkt_filter_cfg_type {
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coleasce_time;
+ u8 bss_index;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_set_rcv_pkt_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coalesce_time;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ u32 mc_addr_count;
+ u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_set_pkt_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt {
+ u8 id;
+ u32 match_cnt;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ u32 match_count;
+ struct wcn36xx_hal_rcv_flt_pkt_match_cnt
+ matches[WCN36XX_HAL_MAX_NUM_FILTERS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_param {
+ /* only valid for response message */
+ u32 status;
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+ u8 bss_index;
+};
+
+/* Packet Filtering Definitions End */
+
+struct wcn36xx_hal_set_power_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Ignore DTIM */
+ u32 ignore_dtim;
+
+ /* DTIM Period */
+ u32 dtim_period;
+
+ /* Listen Interval */
+ u32 listen_interval;
+
+ /* Broadcast Multicast Filter */
+ u32 bcast_mcast_filter;
+
+ /* Beacon Early Termination */
+ u32 enable_bet;
+
+ /* Beacon Early Termination Interval */
+ u32 bet_interval;
+} __packed;
+
+struct wcn36xx_hal_set_power_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum place_holder_in_cap_bitmap {
+ MCC = 0,
+ P2P = 1,
+ DOT11AC = 2,
+ SLM_SESSIONIZATION = 3,
+ DOT11AC_OPMODE = 4,
+ SAP32STA = 5,
+ TDLS = 6,
+ P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+ WLANACTIVE_OFFLOAD = 8,
+ BEACON_OFFLOAD = 9,
+ SCAN_OFFLOAD = 10,
+ ROAM_OFFLOAD = 11,
+ BCN_MISS_OFFLOAD = 12,
+ STA_POWERSAVE = 13,
+ STA_ADVANCED_PWRSAVE = 14,
+ AP_UAPSD = 15,
+ AP_DFS = 16,
+ BLOCKACK = 17,
+ PHY_ERR = 18,
+ BCN_FILTER = 19,
+ RTT = 20,
+ RATECTRL = 21,
+ WOW = 22,
+ MAX_FEATURE_SUPPORTED = 128,
+};
+
+struct wcn36xx_hal_feat_caps_msg {
+
+ struct wcn36xx_hal_msg_header header;
+
+ u32 feat_caps[4];
+} __packed;
+
+/* status codes to help debug rekey failures */
+enum gtk_rekey_status {
+ WCN36XX_HAL_GTK_REKEY_STATUS_SUCCESS = 0,
+
+ /* rekey detected, but not handled */
+ WCN36XX_HAL_GTK_REKEY_STATUS_NOT_HANDLED = 1,
+
+ /* MIC check error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MIC_ERROR = 2,
+
+ /* decryption error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_DECRYPT_ERROR = 3,
+
+ /* M1 replay detected */
+ WCN36XX_HAL_GTK_REKEY_STATUS_REPLAY_ERROR = 4,
+
+ /* missing GTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_KDE = 5,
+
+ /* missing iGTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_IGTK_KDE = 6,
+
+ /* key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_INSTALL_ERROR = 7,
+
+ /* iGTK key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_IGTK_INSTALL_ERROR = 8,
+
+ /* GTK rekey M2 response TX error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_RESP_TX_ERROR = 9,
+
+ /* non-specific general error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_GEN_ERROR = 255
+};
+
+/* wake reason types */
+enum wake_reason_type {
+ WCN36XX_HAL_WAKE_REASON_NONE = 0,
+
+ /* magic packet match */
+ WCN36XX_HAL_WAKE_REASON_MAGIC_PACKET = 1,
+
+ /* host defined pattern match */
+ WCN36XX_HAL_WAKE_REASON_PATTERN_MATCH = 2,
+
+ /* EAP-ID frame detected */
+ WCN36XX_HAL_WAKE_REASON_EAPID_PACKET = 3,
+
+ /* start of EAPOL 4-way handshake detected */
+ WCN36XX_HAL_WAKE_REASON_EAPOL4WAY_PACKET = 4,
+
+ /* network scan offload match */
+ WCN36XX_HAL_WAKE_REASON_NETSCAN_OFFL_MATCH = 5,
+
+ /* GTK rekey status wakeup (see status) */
+ WCN36XX_HAL_WAKE_REASON_GTK_REKEY_STATUS = 6,
+
+ /* BSS connection lost */
+ WCN36XX_HAL_WAKE_REASON_BSS_CONN_LOST = 7,
+};
+
+/*
+ Wake Packet which is saved at tWakeReasonParams.DataStart
+ This data is sent for any wake reasons that involve a packet-based wakeup :
+
+ WCN36XX_HAL_WAKE_REASON_TYPE_MAGIC_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_PATTERN_MATCH
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPID_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPOL4WAY_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_GTK_REKEY_STATUS
+
+ The information is provided to the host for auditing and debug purposes
+
+*/
+
+/* Wake reason indication */
+struct wcn36xx_hal_wake_reason_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* see tWakeReasonType */
+ u32 reason;
+
+ /* argument specific to the reason type */
+ u32 reason_arg;
+
+ /* length of optional data stored in this message, in case HAL
+ * truncates the data (i.e. data packets) this length will be less
+ * than the actual length */
+ u32 stored_data_len;
+
+ /* actual length of data */
+ u32 actual_data_len;
+
+ /* variable length start of data (length == storedDataLen) see
+ * specific wake type */
+ u8 data_start[1];
+
+ u32 bss_index:8;
+ u32 reserved:24;
+};
+
+#define WCN36XX_HAL_GTK_KEK_BYTES 16
+#define WCN36XX_HAL_GTK_KCK_BYTES 16
+
+#define WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE (1 << 0)
+
+#define GTK_SET_BSS_KEY_TAG 0x1234AA55
+
+struct wcn36xx_hal_gtk_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* optional flags */
+ u32 flags;
+
+ /* Key confirmation key */
+ u8 kck[WCN36XX_HAL_GTK_KCK_BYTES];
+
+ /* key encryption key */
+ u8 kek[WCN36XX_HAL_GTK_KEK_BYTES];
+
+ /* replay counter */
+ u64 key_replay_counter;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* last rekey status when the rekey was offloaded */
+ u32 last_rekey_status;
+
+ /* current replay counter value */
+ u64 key_replay_counter;
+
+ /* total rekey attempts */
+ u32 total_rekey_count;
+
+ /* successful GTK rekeys */
+ u32 gtk_rekey_count;
+
+ /* successful iGTK rekeys */
+ u32 igtk_rekey_count;
+
+ u8 bss_index;
+};
+
+struct dhcp_info {
+ /* Indicates the device mode which indicates about the DHCP activity */
+ u8 device_mode;
+
+ u8 addr[ETH_ALEN];
+};
+
+struct dhcp_ind_status {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/*
+ * Thermal Mitigation mode of operation.
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_0 - Based on AMPDU disabling aggregation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_1 - Based on AMPDU disabling aggregation
+ * and reducing transmit power
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_2 - Not supported */
+enum wcn36xx_hal_thermal_mitigation_mode_type {
+ HAL_THERMAL_MITIGATION_MODE_INVALID = -1,
+ HAL_THERMAL_MITIGATION_MODE_0,
+ HAL_THERMAL_MITIGATION_MODE_1,
+ HAL_THERMAL_MITIGATION_MODE_2,
+ HAL_THERMAL_MITIGATION_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/*
+ * Thermal Mitigation level.
+ * Note the levels are incremental i.e WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 =
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 +
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 - lowest level of thermal mitigation.
+ * This level indicates normal mode of operation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1 - 1st level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 - 2nd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_3 - 3rd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_4 - 4th level of thermal mitigation
+ */
+enum wcn36xx_hal_thermal_mitigation_level_type {
+ HAL_THERMAL_MITIGATION_LEVEL_INVALID = -1,
+ HAL_THERMAL_MITIGATION_LEVEL_0,
+ HAL_THERMAL_MITIGATION_LEVEL_1,
+ HAL_THERMAL_MITIGATION_LEVEL_2,
+ HAL_THERMAL_MITIGATION_LEVEL_3,
+ HAL_THERMAL_MITIGATION_LEVEL_4,
+ HAL_THERMAL_MITIGATION_LEVEL_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/* WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ */
+struct set_thermal_mitigation_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Thermal Mitigation Operation Mode */
+ enum wcn36xx_hal_thermal_mitigation_mode_type mode;
+
+ /* Thermal Mitigation Level */
+ enum wcn36xx_hal_thermal_mitigation_level_type level;
+};
+
+struct set_thermal_mitigation_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Per STA Class B Statistics. Class B statistics are STA TX/RX stats
+ * provided to FW from Host via periodic messages */
+struct stats_class_b_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Duration over which this stats was collected */
+ u32 duration;
+
+ /* Per STA Stats */
+
+ /* TX stats */
+ u32 tx_bytes_pushed;
+ u32 tx_packets_pushed;
+
+ /* RX stats */
+ u32 rx_bytes_rcvd;
+ u32 rx_packets_rcvd;
+ u32 rx_time_total;
+};
+
+#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
new file mode 100644
index 0000000..7839b31
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -0,0 +1,1036 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "wcn36xx.h"
+
+unsigned int wcn36xx_dbg_mask;
+module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+/* The wcn firmware expects channel values to matching
+ * their mnemonic values. So use these for .hw_value. */
+static struct ieee80211_channel wcn_2ghz_channels[] = {
+ CHAN2G(2412, 1), /* Channel 1 */
+ CHAN2G(2417, 2), /* Channel 2 */
+ CHAN2G(2422, 3), /* Channel 3 */
+ CHAN2G(2427, 4), /* Channel 4 */
+ CHAN2G(2432, 5), /* Channel 5 */
+ CHAN2G(2437, 6), /* Channel 6 */
+ CHAN2G(2442, 7), /* Channel 7 */
+ CHAN2G(2447, 8), /* Channel 8 */
+ CHAN2G(2452, 9), /* Channel 9 */
+ CHAN2G(2457, 10), /* Channel 10 */
+ CHAN2G(2462, 11), /* Channel 11 */
+ CHAN2G(2467, 12), /* Channel 12 */
+ CHAN2G(2472, 13), /* Channel 13 */
+ CHAN2G(2484, 14) /* Channel 14 */
+
+};
+
+static struct ieee80211_channel wcn_5ghz_channels[] = {
+ CHAN5G(5180, 36),
+ CHAN5G(5200, 40),
+ CHAN5G(5220, 44),
+ CHAN5G(5240, 48),
+ CHAN5G(5260, 52),
+ CHAN5G(5280, 56),
+ CHAN5G(5300, 60),
+ CHAN5G(5320, 64),
+ CHAN5G(5500, 100),
+ CHAN5G(5520, 104),
+ CHAN5G(5540, 108),
+ CHAN5G(5560, 112),
+ CHAN5G(5580, 116),
+ CHAN5G(5600, 120),
+ CHAN5G(5620, 124),
+ CHAN5G(5640, 128),
+ CHAN5G(5660, 132),
+ CHAN5G(5700, 140),
+ CHAN5G(5745, 149),
+ CHAN5G(5765, 153),
+ CHAN5G(5785, 157),
+ CHAN5G(5805, 161),
+ CHAN5G(5825, 165)
+};
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (_hw_rate) \
+}
+
+static struct ieee80211_rate wcn_2ghz_rates[] = {
+ RATE(10, HW_RATE_INDEX_1MBPS, 0),
+ RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_rate wcn_5ghz_rates[] = {
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_supported_band wcn_band_2ghz = {
+ .channels = wcn_2ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_2ghz_channels),
+ .bitrates = wcn_2ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_2ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+static struct ieee80211_supported_band wcn_band_5ghz = {
+ .channels = wcn_5ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_5ghz_channels),
+ .bitrates = wcn_5ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_5ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+#ifdef CONFIG_PM
+
+static const struct wiphy_wowlan_support wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY
+};
+
+#endif
+
+static inline u8 get_sta_index(struct ieee80211_vif *vif,
+ struct wcn36xx_sta *sta_priv)
+{
+ return NL80211_IFTYPE_STATION == vif->type ?
+ sta_priv->bss_sta_index :
+ sta_priv->sta_index;
+}
+
+static int wcn36xx_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+ int ret;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
+
+ /* SMD initialization */
+ ret = wcn36xx_smd_open(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to open smd channel: %d\n", ret);
+ goto out_err;
+ }
+
+ /* Allocate memory pools for Mgmt BD headers and Data BD headers */
+ ret = wcn36xx_dxe_allocate_mem_pools(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
+ goto out_smd_close;
+ }
+
+ ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
+ goto out_free_dxe_pool;
+ }
+
+ wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+ if (!wcn->hal_buf) {
+ wcn36xx_err("Failed to allocate smd buf\n");
+ ret = -ENOMEM;
+ goto out_free_dxe_ctl;
+ }
+
+ ret = wcn36xx_smd_load_nv(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to push NV to chip\n");
+ goto out_free_smd_buf;
+ }
+
+ ret = wcn36xx_smd_start(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to start chip\n");
+ goto out_free_smd_buf;
+ }
+
+ /* DMA channel initialization */
+ ret = wcn36xx_dxe_init(wcn);
+ if (ret) {
+ wcn36xx_err("DXE init failed\n");
+ goto out_smd_stop;
+ }
+
+ wcn36xx_debugfs_init(wcn);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ }
+ INIT_LIST_HEAD(&wcn->vif_list);
+ return 0;
+
+out_smd_stop:
+ wcn36xx_smd_stop(wcn);
+out_free_smd_buf:
+ kfree(wcn->hal_buf);
+out_free_dxe_pool:
+ wcn36xx_dxe_free_mem_pools(wcn);
+out_free_dxe_ctl:
+ wcn36xx_dxe_free_ctl_blks(wcn);
+out_smd_close:
+ wcn36xx_smd_close(wcn);
+out_err:
+ return ret;
+}
+
+static void wcn36xx_stop(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
+
+ wcn36xx_debugfs_exit(wcn);
+ wcn36xx_smd_stop(wcn);
+ wcn36xx_dxe_deinit(wcn);
+ wcn36xx_smd_close(wcn);
+
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_dxe_free_ctl_blks(wcn);
+
+ kfree(wcn->hal_buf);
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ int ch = WCN36XX_HW_CHANNEL(wcn);
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+ ch);
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ wcn36xx_smd_switch_channel(wcn, vif, ch);
+ }
+ }
+
+ return 0;
+}
+
+#define WCN36XX_SUPPORTED_FILTERS (0)
+
+static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed,
+ unsigned int *total, u64 multicast)
+{
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
+
+ *total &= WCN36XX_SUPPORTED_FILTERS;
+}
+
+static void wcn36xx_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ if (control->sta)
+ sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+
+ if (wcn36xx_start_tx(wcn, sta_priv, skb))
+ ieee80211_free_txskb(wcn->hw, skb);
+}
+
+static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = vif_priv->sta;
+ int ret = 0;
+ u8 key[WLAN_MAX_KEY_LEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
+ cmd, key_conf->cipher, key_conf->keyidx,
+ key_conf->keylen, key_conf->flags);
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
+ key_conf->key,
+ key_conf->keylen);
+
+ switch (key_conf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
+ break;
+ default:
+ wcn36xx_err("Unsupported key type 0x%x\n",
+ key_conf->cipher);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
+ /*
+ * Supplicant is sending key in the wrong order:
+ * Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
+ * but HW expects it to be in the order as described in
+ * IEEE 802.11 spec (see chapter 11.7) like this:
+ * Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
+ */
+ memcpy(key, key_conf->key, 16);
+ memcpy(key + 16, key_conf->key + 24, 8);
+ memcpy(key + 24, key_conf->key + 16, 8);
+ } else {
+ memcpy(key, key_conf->key, key_conf->keylen);
+ }
+
+ if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
+ sta_priv->is_data_encrypted = true;
+ /* Reconfigure bss with encrypt_type */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_smd_config_bss(wcn,
+ vif,
+ sta,
+ sta->addr,
+ true);
+
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ } else {
+ wcn36xx_smd_set_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key);
+ if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
+ (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
+ sta_priv->is_data_encrypted = true;
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ }
+ }
+ break;
+ case DISABLE_KEY:
+ if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+ wcn36xx_smd_remove_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx);
+ } else {
+ sta_priv->is_data_encrypted = false;
+ /* do not remove key if disassociated */
+ if (sta_priv->aid)
+ wcn36xx_smd_remove_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ get_sta_index(vif, sta_priv));
+ }
+ break;
+ default:
+ wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
+ ret = -EOPNOTSUPP;
+ goto out;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
+ wcn36xx_smd_start_scan(wcn);
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_end_scan(wcn);
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+}
+
+static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
+ enum ieee80211_band band)
+{
+ int i, size;
+ u16 *rates_table;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ u32 rates = sta->supp_rates[band];
+
+ memset(&sta_priv->supported_rates, 0,
+ sizeof(sta_priv->supported_rates));
+ sta_priv->supported_rates.op_rate_mode = STA_11n;
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
+ rates_table = sta_priv->supported_rates.dsss_rates;
+ if (band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_2ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+ }
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
+ rates_table = sta_priv->supported_rates.ofdm_rates;
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_5ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
+ sizeof(sta_priv->supported_rates.supported_mcs_set));
+ memcpy(sta_priv->supported_rates.supported_mcs_set,
+ sta->ht_cap.mcs.rx_mask,
+ sizeof(sta->ht_cap.mcs.rx_mask));
+ }
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
+{
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
+ HW_RATE_INDEX_6MBPS,
+ HW_RATE_INDEX_9MBPS,
+ HW_RATE_INDEX_12MBPS,
+ HW_RATE_INDEX_18MBPS,
+ HW_RATE_INDEX_24MBPS,
+ HW_RATE_INDEX_36MBPS,
+ HW_RATE_INDEX_48MBPS,
+ HW_RATE_INDEX_54MBPS
+ };
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
+ HW_RATE_INDEX_1MBPS,
+ HW_RATE_INDEX_2MBPS,
+ HW_RATE_INDEX_5_5MBPS,
+ HW_RATE_INDEX_11MBPS
+ };
+
+ rates->op_rate_mode = STA_11n;
+ memcpy(rates->dsss_rates, dsss_rates,
+ sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
+ memcpy(rates->ofdm_rates, ofdm_rates,
+ sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
+ rates->supported_mcs_set[0] = 0xFF;
+}
+static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct sk_buff *skb = NULL;
+ u16 tim_off, tim_len;
+ enum wcn36xx_hal_link_state link_state;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+ vif, changed);
+
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed dtim period %d\n",
+ bss_conf->dtim_period);
+
+ vif_priv->dtim_period = bss_conf->dtim_period;
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss PS set %d\n",
+ bss_conf->ps);
+ if (bss_conf->ps) {
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
+ bss_conf->bssid);
+
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ vif_priv->is_joining = true;
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_join(wcn, bss_conf->bssid,
+ vif->addr, WCN36XX_HW_CHANNEL(wcn));
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ bss_conf->bssid, false);
+ } else {
+ vif_priv->is_joining = false;
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_SSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed ssid\n");
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
+ bss_conf->ssid, bss_conf->ssid_len);
+
+ vif_priv->ssid.length = bss_conf->ssid_len;
+ memcpy(&vif_priv->ssid.ssid,
+ bss_conf->ssid,
+ bss_conf->ssid_len);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ vif_priv->is_joining = false;
+ if (bss_conf->assoc) {
+ struct ieee80211_sta *sta;
+ struct wcn36xx_sta *sta_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac assoc bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ wcn36xx_err("sta %pM is not found\n",
+ bss_conf->bssid);
+ rcu_read_unlock();
+ goto out;
+ }
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+
+ wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE);
+ wcn36xx_smd_config_bss(wcn, vif, sta,
+ bss_conf->bssid,
+ true);
+ sta_priv->aid = bss_conf->aid;
+ /*
+ * config_sta must be called from because this is the
+ * place where AID is available.
+ */
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ rcu_read_unlock();
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "disassociated bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+ wcn36xx_smd_set_link_st(wcn,
+ bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ }
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
+ skb = ieee80211_proberesp_get(hw, vif);
+ if (!skb) {
+ wcn36xx_err("failed to alloc probereq skb\n");
+ goto out;
+ }
+
+ wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
+ dev_kfree_skb(skb);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed beacon enabled %d\n",
+ bss_conf->enable_beacon);
+
+ if (bss_conf->enable_beacon) {
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ vif->addr, false);
+ skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
+ &tim_len);
+ if (!skb) {
+ wcn36xx_err("failed to alloc beacon skb\n");
+ goto out;
+ }
+ wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
+ dev_kfree_skb(skb);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)
+ link_state = WCN36XX_HAL_LINK_IBSS_STATE;
+ else
+ link_state = WCN36XX_HAL_LINK_AP_STATE;
+
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ link_state);
+ } else {
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+out:
+ return;
+}
+
+/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
+static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
+
+ wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
+ return 0;
+}
+
+static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
+
+ list_del(&vif_priv->list);
+ wcn36xx_smd_delete_sta_self(wcn, vif->addr);
+}
+
+static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
+ vif, vif->type);
+
+ if (!(NL80211_IFTYPE_STATION == vif->type ||
+ NL80211_IFTYPE_AP == vif->type ||
+ NL80211_IFTYPE_ADHOC == vif->type ||
+ NL80211_IFTYPE_MESH_POINT == vif->type)) {
+ wcn36xx_warn("Unsupported interface type requested: %d\n",
+ vif->type);
+ return -EOPNOTSUPP;
+ }
+
+ list_add(&vif_priv->list, &wcn->vif_list);
+ wcn36xx_smd_add_sta_self(wcn, vif);
+
+ return 0;
+}
+
+static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+ vif, sta->addr);
+
+ vif_priv->sta = sta_priv;
+ sta_priv->vif = vif_priv;
+ /*
+ * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
+ * at this stage AID is not available yet.
+ */
+ if (NL80211_IFTYPE_STATION != vif->type) {
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+ sta_priv->aid = sta->aid;
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ }
+ return 0;
+}
+
+static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+ vif, sta->addr, sta_priv->sta_index);
+
+ wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+ vif_priv->sta = NULL;
+ sta_priv->vif = NULL;
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, true);
+ return 0;
+}
+
+static int wcn36xx_resume(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, false);
+ return 0;
+}
+
+#endif
+
+static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+ action, tid);
+
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ sta_priv->tid = tid;
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+ get_sta_index(vif, sta_priv));
+ wcn36xx_smd_add_ba(wcn);
+ wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
+ get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ default:
+ wcn36xx_err("Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static const struct ieee80211_ops wcn36xx_ops = {
+ .start = wcn36xx_start,
+ .stop = wcn36xx_stop,
+ .add_interface = wcn36xx_add_interface,
+ .remove_interface = wcn36xx_remove_interface,
+#ifdef CONFIG_PM
+ .suspend = wcn36xx_suspend,
+ .resume = wcn36xx_resume,
+#endif
+ .config = wcn36xx_config,
+ .configure_filter = wcn36xx_configure_filter,
+ .tx = wcn36xx_tx,
+ .set_key = wcn36xx_set_key,
+ .sw_scan_start = wcn36xx_sw_scan_start,
+ .sw_scan_complete = wcn36xx_sw_scan_complete,
+ .bss_info_changed = wcn36xx_bss_info_changed,
+ .set_rts_threshold = wcn36xx_set_rts_threshold,
+ .sta_add = wcn36xx_sta_add,
+ .sta_remove = wcn36xx_sta_remove,
+ .ampdu_action = wcn36xx_ampdu_action,
+};
+
+static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
+{
+ int ret = 0;
+
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
+
+ wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_TIMING_BEACON_ONLY;
+
+ wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
+ wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+
+ wcn->hw->wiphy->cipher_suites = cipher_suites;
+ wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+#ifdef CONFIG_PM
+ wcn->hw->wiphy->wowlan = &wowlan_support;
+#endif
+
+ wcn->hw->max_listen_interval = 200;
+
+ wcn->hw->queues = 4;
+
+ SET_IEEE80211_DEV(wcn->hw, wcn->dev);
+
+ wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
+ wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+
+ return ret;
+}
+
+static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ /* Set TX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlantx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get tx_irq\n");
+ return -ENOENT;
+ }
+ wcn->tx_irq = res->start;
+
+ /* Set RX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlanrx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get rx_irq\n");
+ return -ENOENT;
+ }
+ wcn->rx_irq = res->start;
+
+ /* Map the memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "wcnss_mmio");
+ if (!res) {
+ wcn36xx_err("failed to get mmio\n");
+ return -ENOENT;
+ }
+ wcn->mmio = ioremap(res->start, resource_size(res));
+ if (!wcn->mmio) {
+ wcn36xx_err("failed to map io memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int wcn36xx_probe(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct wcn36xx *wcn;
+ int ret;
+ u8 addr[ETH_ALEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
+
+ hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
+ if (!hw) {
+ wcn36xx_err("failed to alloc hw\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ platform_set_drvdata(pdev, hw);
+ wcn = hw->priv;
+ wcn->hw = hw;
+ wcn->dev = &pdev->dev;
+ wcn->ctrl_ops = pdev->dev.platform_data;
+
+ mutex_init(&wcn->hal_mutex);
+
+ if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+ wcn36xx_info("mac address: %pM\n", addr);
+ SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
+ }
+
+ ret = wcn36xx_platform_get_resources(wcn, pdev);
+ if (ret)
+ goto out_wq;
+
+ wcn36xx_init_ieee80211(wcn);
+ ret = ieee80211_register_hw(wcn->hw);
+ if (ret)
+ goto out_unmap;
+
+ return 0;
+
+out_unmap:
+ iounmap(wcn->mmio);
+out_wq:
+ ieee80211_free_hw(hw);
+out_err:
+ return ret;
+}
+static int wcn36xx_remove(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+
+ mutex_destroy(&wcn->hal_mutex);
+
+ ieee80211_unregister_hw(hw);
+ iounmap(wcn->mmio);
+ ieee80211_free_hw(hw);
+
+ return 0;
+}
+static const struct platform_device_id wcn36xx_platform_id_table[] = {
+ {
+ .name = "wcn36xx",
+ .driver_data = 0
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+
+static struct platform_driver wcn36xx_driver = {
+ .probe = wcn36xx_probe,
+ .remove = wcn36xx_remove,
+ .driver = {
+ .name = "wcn36xx",
+ .owner = THIS_MODULE,
+ },
+ .id_table = wcn36xx_platform_id_table,
+};
+
+static int __init wcn36xx_init(void)
+{
+ platform_driver_register(&wcn36xx_driver);
+ return 0;
+}
+module_init(wcn36xx_init);
+
+static void __exit wcn36xx_exit(void)
+{
+ platform_driver_unregister(&wcn36xx_driver);
+}
+module_exit(wcn36xx_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
new file mode 100644
index 0000000..28b515c
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "wcn36xx.h"
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ int ret = 0;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ /* TODO: Make sure the TX chain clean */
+ ret = wcn36xx_smd_enter_bmps(wcn, vif);
+ if (!ret) {
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
+ vif_priv->pw_state = WCN36XX_BMPS;
+ } else {
+ /*
+ * One of the reasons why HW will not enter BMPS is because
+ * driver is trying to enter bmps before first beacon was
+ * received just after auth complete
+ */
+ wcn36xx_err("Can not enter BMPS!\n");
+ }
+ return ret;
+}
+
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (WCN36XX_BMPS != vif_priv->pw_state) {
+ wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
+ return -EINVAL;
+ }
+ wcn36xx_smd_exit_bmps(wcn, vif);
+ vif_priv->pw_state = WCN36XX_FULL_POWER;
+ return 0;
+}
+
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
+ return wcn36xx_smd_keep_alive_req(wcn, vif,
+ WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.h b/drivers/net/wireless/ath/wcn36xx/pmc.h
new file mode 100644
index 0000000..f72ed68
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_PMC_H_
+#define _WCN36XX_PMC_H_
+
+struct wcn36xx;
+
+enum wcn36xx_power_state {
+ WCN36XX_FULL_POWER,
+ WCN36XX_BMPS
+};
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+#endif /* _WCN36XX_PMC_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
new file mode 100644
index 0000000..f8c3a10
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -0,0 +1,2126 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+#include "smd.h"
+
+static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
+{
+ struct wcn36xx_hal_cfg *entry;
+ u32 *val;
+
+ if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
+ wcn36xx_err("Not enough room for TLV entry\n");
+ return -ENOMEM;
+ }
+
+ entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
+ entry->id = id;
+ entry->len = sizeof(u32);
+ entry->pad_bytes = 0;
+ entry->reserve = 0;
+
+ val = (u32 *) (entry + 1);
+ *val = value;
+
+ *len += sizeof(*entry) + sizeof(u32);
+
+ return 0;
+}
+
+static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+ bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
+ else if (sta && sta->ht_cap.ht_supported)
+ bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+ else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+ bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
+ else
+ bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
+}
+
+static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
+{
+ return caps & flag ? 1 : 0;
+}
+static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (sta && sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ bss_params->ht = sta->ht_cap.ht_supported;
+ bss_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ bss_params->lsig_tx_op_protection_full_support =
+ is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
+ bss_params->lln_non_gf_coexist =
+ !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ /* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
+ bss_params->dual_cts_protection = 0;
+ /* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
+ bss_params->ht20_coexist = 0;
+ }
+}
+
+static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ if (sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ sta_params->ht_capable = sta->ht_cap.ht_supported;
+ sta_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ sta_params->lsig_txop_protection = is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
+ sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+ sta_params->max_amsdu_size = is_cap_supported(caps,
+ IEEE80211_HT_CAP_MAX_AMSDU);
+ sta_params->sgi_20Mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_20);
+ sta_params->sgi_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_40);
+ sta_params->green_field_capable = is_cap_supported(caps,
+ IEEE80211_HT_CAP_GRN_FLD);
+ sta_params->delayed_ba_support = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DELAY_BA);
+ sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DSSSCCK40);
+ }
+}
+
+static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *priv_sta = NULL;
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ sta_params->type = 1;
+ sta_params->sta_index = 0xFF;
+ } else {
+ sta_params->type = 0;
+ sta_params->sta_index = 1;
+ }
+
+ sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ /*
+ * In STA mode ieee80211_sta contains bssid and ieee80211_vif
+ * contains our mac address. In AP mode we are bssid so vif
+ * contains bssid and ieee80211_sta contains mac.
+ */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
+
+ sta_params->encrypt_type = priv_vif->encrypt_type;
+ sta_params->short_preamble_supported =
+ !(WCN36XX_FLAGS(wcn) &
+ IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+
+ sta_params->rifs_mode = 0;
+ sta_params->rmf = 0;
+ sta_params->action = 0;
+ sta_params->uapsd = 0;
+ sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
+ sta_params->max_ampdu_duration = 0;
+ sta_params->bssid_index = priv_vif->bss_index;
+ sta_params->p2p = 0;
+
+ if (sta) {
+ priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
+ sta_params->wmm_enabled = sta->wme;
+ sta_params->max_sp_len = sta->max_sp;
+ sta_params->aid = priv_sta->aid;
+ wcn36xx_smd_set_sta_ht_params(sta, sta_params);
+ memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
+ sizeof(priv_sta->supported_rates));
+ } else {
+ wcn36xx_set_default_rates(&sta_params->supported_rates);
+ }
+}
+
+static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
+{
+ int ret = 0;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
+
+ init_completion(&wcn->hal_rsp_compl);
+ ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+ if (ret) {
+ wcn36xx_err("HAL TX failed\n");
+ goto out;
+ }
+ if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
+ msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
+ wcn36xx_err("Timeout while waiting SMD response\n");
+ ret = -ETIME;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+#define INIT_HAL_MSG(msg_body, type) \
+ do { \
+ memset(&msg_body, 0, sizeof(msg_body)); \
+ msg_body.header.msg_type = type; \
+ msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+ msg_body.header.len = sizeof(msg_body); \
+ } while (0) \
+
+#define PREPARE_HAL_BUF(send_buf, msg_body) \
+ do { \
+ memset(send_buf, 0, msg_body.header.len); \
+ memcpy(send_buf, &msg_body, sizeof(msg_body)); \
+ } while (0) \
+
+static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
+{
+ struct wcn36xx_fw_msg_status_rsp *rsp;
+
+ if (len < sizeof(struct wcn36xx_hal_msg_header) +
+ sizeof(struct wcn36xx_fw_msg_status_rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_fw_msg_status_rsp *)
+ (buf + sizeof(struct wcn36xx_hal_msg_header));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+ return rsp->status;
+
+ return 0;
+}
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
+{
+ const struct firmware *nv;
+ struct nv_data *nv_d;
+ struct wcn36xx_hal_nv_img_download_req_msg msg_body;
+ int fw_bytes_left;
+ int ret;
+ u16 fm_offset = 0;
+
+ ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out_free_nv;
+ }
+
+ nv_d = (struct nv_data *)nv->data;
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
+
+ msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
+
+ msg_body.frag_number = 0;
+ /* hal_buf must be protected with mutex */
+ mutex_lock(&wcn->hal_mutex);
+
+ do {
+ fw_bytes_left = nv->size - fm_offset - 4;
+ if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
+ msg_body.last_fragment = 0;
+ msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
+ } else {
+ msg_body.last_fragment = 1;
+ msg_body.nv_img_buffer_size = fw_bytes_left;
+
+ /* Do not forget update general message len */
+ msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
+
+ }
+
+ /* Add load NV request message header */
+ memcpy(wcn->hal_buf, &msg_body, sizeof(msg_body));
+
+ /* Add NV body itself */
+ memcpy(wcn->hal_buf + sizeof(msg_body),
+ &nv_d->table + fm_offset,
+ msg_body.nv_img_buffer_size);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret)
+ goto out_unlock;
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_load_nv response failed err=%d\n",
+ ret);
+ goto out_unlock;
+ }
+ msg_body.frag_number++;
+ fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
+
+ } while (msg_body.last_fragment != 1);
+
+out_unlock:
+ mutex_unlock(&wcn->hal_mutex);
+out_free_nv:
+ release_firmware(nv);
+
+ return ret;
+}
+
+static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_mac_start_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
+ return -EIO;
+
+ memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+ memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+
+ /* null terminate the strings, just in case */
+ wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+ wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+
+ wcn->fw_revision = rsp->start_rsp_params.version.revision;
+ wcn->fw_version = rsp->start_rsp_params.version.version;
+ wcn->fw_minor = rsp->start_rsp_params.version.minor;
+ wcn->fw_major = rsp->start_rsp_params.version.major;
+
+ wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
+ wcn->wlan_version, wcn->crm_version);
+
+ wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
+ wcn->fw_major, wcn->fw_minor,
+ wcn->fw_version, wcn->fw_revision,
+ rsp->start_rsp_params.stations,
+ rsp->start_rsp_params.bssids);
+
+ return 0;
+}
+
+int wcn36xx_smd_start(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_start_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
+
+ msg_body.params.type = DRIVER_TYPE_PRODUCTION;
+ msg_body.params.len = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
+ msg_body.params.type);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start response failed err=%d\n", ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_stop(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_stop_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
+
+ msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_stop failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_stop response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_init_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_init_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_start_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_end_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_end_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_finish_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
+ msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_finish_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
+ int ret = 0;
+
+ ret = wcn36xx_smd_rsp_status_check(buf, len);
+ if (ret)
+ return ret;
+ rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
+ rsp->channel_number, rsp->status);
+ return ret;
+}
+
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch)
+{
+ struct wcn36xx_hal_switch_channel_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
+
+ msg_body.channel_number = (u8)ch;
+ msg_body.tx_mgmt_power = 0xbf;
+ msg_body.max_tx_power = 0xbf;
+ memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_switch_channel failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_update_scan_params_resp *rsp;
+
+ rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
+
+ /* Remove the PNO version bit */
+ rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
+ wcn36xx_warn("error response from update scan\n");
+ return rsp->status;
+ }
+
+ return 0;
+}
+
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_update_scan_params_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
+
+ msg_body.dot11d_enabled = 0;
+ msg_body.dot11d_resolved = 0;
+ msg_body.channel_count = 26;
+ msg_body.active_min_ch_time = 60;
+ msg_body.active_max_ch_time = 120;
+ msg_body.passive_min_ch_time = 60;
+ msg_body.passive_max_ch_time = 110;
+ msg_body.state = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update scan params channel_count %d\n",
+ msg_body.channel_count);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_scan_params failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_scan_params response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
+
+ if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal add sta self failure: %d\n",
+ rsp->status);
+ return rsp->status;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self status %d self_sta_index %d dpu_index %d\n",
+ rsp->status, rsp->self_sta_index, rsp->dpu_index);
+
+ priv_vif->self_sta_index = rsp->self_sta_index;
+ priv_vif->self_dpu_desc_index = rsp->dpu_index;
+
+ return 0;
+}
+
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_add_sta_self_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self self_addr %pM status %d\n",
+ msg_body.self_addr, msg_body.status);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_add_sta_self_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
+{
+ struct wcn36xx_hal_del_sta_self_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_delete_sta_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
+
+ msg_body.sta_index = sta_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal delete sta sta_index %d\n",
+ msg_body.sta_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_join_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_join_rsp_msg *rsp;
+
+ if (wcn36xx_smd_rsp_status_check(buf, len))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal rsp join status %d tx_mgmt_power %d\n",
+ rsp->status, rsp->tx_mgmt_power);
+
+ return 0;
+}
+
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
+{
+ struct wcn36xx_hal_join_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
+ msg_body.channel = ch;
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
+ else
+ msg_body.secondary_channel_offset =
+ PHY_SINGLE_CHANNEL_CENTERED;
+
+ msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
+
+ msg_body.max_tx_power = 0xbf;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
+ msg_body.bssid, msg_body.self_sta_mac_addr,
+ msg_body.channel, msg_body.link_state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_join failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_join response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state)
+{
+ struct wcn36xx_hal_set_link_state_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
+ msg_body.state = state;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal set link state bssid %pM self_mac_addr %pM state %d\n",
+ msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_link_st failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_params *orig,
+ struct wcn36xx_hal_config_sta_params_v1 *v1)
+{
+ /* convert orig to v1 format */
+ memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
+ memcpy(&v1->mac, orig->mac, ETH_ALEN);
+ v1->aid = orig->aid;
+ v1->type = orig->type;
+ v1->listen_interval = orig->listen_interval;
+ v1->ht_capable = orig->ht_capable;
+
+ v1->max_ampdu_size = orig->max_ampdu_size;
+ v1->max_ampdu_density = orig->max_ampdu_density;
+ v1->sgi_40mhz = orig->sgi_40mhz;
+ v1->sgi_20Mhz = orig->sgi_20Mhz;
+
+ memcpy(&v1->supported_rates, &orig->supported_rates,
+ sizeof(orig->supported_rates));
+ v1->sta_index = orig->sta_index;
+}
+
+static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_sta_rsp_msg *rsp;
+ struct config_sta_rsp_params *params;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
+ params = &rsp->params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config sta response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ sta_priv->sta_index = params->sta_index;
+ sta_priv->dpu_desc_index = params->dpu_index;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+ params->status, params->sta_index, params->bssid_index,
+ params->p2p);
+
+ return 0;
+}
+
+static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_req_msg *orig)
+{
+ struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
+ &msg_body.sta_params);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta->action, sta->sta_index, sta->bssid_index,
+ sta->bssid, sta->type, sta->mac, sta->aid);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx_hal_config_sta_req_msg msg;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ sta_params = &msg.sta_params;
+
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta_params->action, sta_params->sta_index,
+ sta_params->bssid_index, sta_params->bssid,
+ sta_params->type, sta_params->mac, sta_params->aid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_sta_rsp(wcn,
+ sta,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_bss_req_msg *orig)
+{
+ struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ /* convert orig to v1 */
+ memcpy(&msg_body.bss_params.bssid,
+ &orig->bss_params.bssid, ETH_ALEN);
+ memcpy(&msg_body.bss_params.self_mac_addr,
+ &orig->bss_params.self_mac_addr, ETH_ALEN);
+
+ msg_body.bss_params.bss_type = orig->bss_params.bss_type;
+ msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
+ msg_body.bss_params.nw_type = orig->bss_params.nw_type;
+
+ msg_body.bss_params.short_slot_time_supported =
+ orig->bss_params.short_slot_time_supported;
+ msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
+ msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
+ msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
+ msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
+ msg_body.bss_params.lln_non_gf_coexist =
+ orig->bss_params.lln_non_gf_coexist;
+
+ msg_body.bss_params.lsig_tx_op_protection_full_support =
+ orig->bss_params.lsig_tx_op_protection_full_support;
+ msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
+ msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
+ msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
+ msg_body.bss_params.tx_channel_width_set =
+ orig->bss_params.tx_channel_width_set;
+ msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
+ msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
+
+ msg_body.bss_params.reserved = orig->bss_params.reserved;
+
+ memcpy(&msg_body.bss_params.ssid,
+ &orig->bss_params.ssid,
+ sizeof(orig->bss_params.ssid));
+
+ msg_body.bss_params.action = orig->bss_params.action;
+ msg_body.bss_params.rateset = orig->bss_params.rateset;
+ msg_body.bss_params.ht = orig->bss_params.ht;
+ msg_body.bss_params.obss_prot_enabled =
+ orig->bss_params.obss_prot_enabled;
+ msg_body.bss_params.rmf = orig->bss_params.rmf;
+ msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
+ msg_body.bss_params.dual_cts_protection =
+ orig->bss_params.dual_cts_protection;
+
+ msg_body.bss_params.max_probe_resp_retry_limit =
+ orig->bss_params.max_probe_resp_retry_limit;
+ msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
+ msg_body.bss_params.proxy_probe_resp =
+ orig->bss_params.proxy_probe_resp;
+ msg_body.bss_params.edca_params_valid =
+ orig->bss_params.edca_params_valid;
+
+ memcpy(&msg_body.bss_params.acbe,
+ &orig->bss_params.acbe,
+ sizeof(orig->bss_params.acbe));
+ memcpy(&msg_body.bss_params.acbk,
+ &orig->bss_params.acbk,
+ sizeof(orig->bss_params.acbk));
+ memcpy(&msg_body.bss_params.acvi,
+ &orig->bss_params.acvi,
+ sizeof(orig->bss_params.acvi));
+ memcpy(&msg_body.bss_params.acvo,
+ &orig->bss_params.acvo,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.ext_set_sta_key_param_valid =
+ orig->bss_params.ext_set_sta_key_param_valid;
+
+ memcpy(&msg_body.bss_params.ext_set_sta_key_param,
+ &orig->bss_params.ext_set_sta_key_param,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.wcn36xx_hal_persona =
+ orig->bss_params.wcn36xx_hal_persona;
+ msg_body.bss_params.spectrum_mgt_enable =
+ orig->bss_params.spectrum_mgt_enable;
+ msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
+ msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
+ &msg_body.bss_params.sta);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta->bssid, sta->action, sta->sta_index,
+ sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+ struct wcn36xx_hal_config_bss_rsp_params *params;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+ params = &rsp->bss_rsp_params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config bss response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+ " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+ " power %d ucast_dpu_signature %d\n",
+ params->status, params->bss_index, params->dpu_desc_index,
+ params->bss_sta_index, params->bss_self_sta_index,
+ params->bss_bcast_sta_idx, params->mac,
+ params->tx_mgmt_power, params->ucast_dpu_signature);
+
+ priv_vif->bss_index = params->bss_index;
+
+ if (priv_vif->sta) {
+ priv_vif->sta->bss_sta_index = params->bss_sta_index;
+ priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+ }
+
+ priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+
+ return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg msg;
+ struct wcn36xx_hal_config_bss_params *bss;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ bss = &msg.bss_params;
+ sta_params = &bss->sta;
+
+ WARN_ON(is_zero_ether_addr(bssid));
+
+ memcpy(&bss->bssid, bssid, ETH_ALEN);
+
+ memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
+ } else if (vif->type == NL80211_IFTYPE_AP) {
+ bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
+
+ /* AP */
+ bss->oper_mode = 0;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
+ } else if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bss->bss_type = WCN36XX_HAL_IBSS_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ } else {
+ wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
+ else
+ bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+
+ bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
+ bss->lla_coexist = 0;
+ bss->llb_coexist = 0;
+ bss->llg_coexist = 0;
+ bss->rifs_mode = 0;
+ bss->beacon_interval = vif->bss_conf.beacon_int;
+ bss->dtim_period = vif_priv->dtim_period;
+
+ wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
+
+ bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ bss->reserved = 0;
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ /* wcn->ssid is only valid in AP and IBSS mode */
+ bss->ssid.length = vif_priv->ssid.length;
+ memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
+
+ bss->obss_prot_enabled = 0;
+ bss->rmf = 0;
+ bss->max_probe_resp_retry_limit = 0;
+ bss->hidden_ssid = vif->bss_conf.hidden_ssid;
+ bss->proxy_probe_resp = 0;
+ bss->edca_params_valid = 0;
+
+ /* FIXME: set acbe, acbk, acvi and acvo */
+
+ bss->ext_set_sta_key_param_valid = 0;
+
+ /* FIXME: set ext_set_sta_key_param */
+
+ bss->spectrum_mgt_enable = 0;
+ bss->tx_mgmt_power = 0;
+ bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
+
+ bss->action = update;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta_params->bssid, sta_params->action,
+ sta_params->sta_index, sta_params->bssid_index,
+ sta_params->aid, sta_params->type,
+ sta_params->mac);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_bss_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_delete_bss_req_msg msg_body;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
+
+ msg_body.bss_index = priv_vif->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off)
+{
+ struct wcn36xx_hal_send_beacon_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.beacon_length = skb_beacon->len + 6;
+
+ if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
+ memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
+ memcpy(&(msg_body.beacon[4]), skb_beacon->data,
+ skb_beacon->len);
+ } else {
+ wcn36xx_err("Beacon is to big: beacon size=%d\n",
+ msg_body.beacon_length);
+ return -ENOMEM;
+ }
+ memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.tim_ie_offset = tim_off+4;
+ msg_body.p2p_ie_offset = p2p_off;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal send beacon beacon_length %d\n",
+ msg_body.beacon_length);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_send_beacon failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct wcn36xx_hal_send_probe_resp_req_msg msg;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
+
+ if (skb->len > BEACON_TEMPLATE_SIZE) {
+ wcn36xx_warn("probe response template is too big: %d\n",
+ skb->len);
+ return -E2BIG;
+ }
+
+ msg.probe_resp_template_len = skb->len;
+ memcpy(&msg.probe_resp_template, skb->data, skb->len);
+
+ memcpy(msg.bssid, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update probe rsp len %d bssid %pM\n",
+ msg.probe_resp_template_len, msg.bssid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_set_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
+
+ msg_body.set_sta_key_params.sta_index = sta_index;
+ msg_body.set_sta_key_params.enc_type = enc_type;
+
+ msg_body.set_sta_key_params.key[0].id = keyidx;
+ msg_body.set_sta_key_params.key[0].unicast = 1;
+ msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+ msg_body.set_sta_key_params.key[0].pae_role = 0;
+ msg_body.set_sta_key_params.key[0].length = keylen;
+ memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ msg_body.set_sta_key_params.single_tid_rc = 1;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key)
+{
+ struct wcn36xx_hal_set_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.num_keys = 1;
+ msg_body.keys[0].id = keyidx;
+ msg_body.keys[0].unicast = 0;
+ msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
+ msg_body.keys[0].pae_role = 0;
+ msg_body.keys[0].length = keylen;
+ memcpy(msg_body.keys[0].key, key, keylen);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
+
+ msg_body.sta_idx = sta_index;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx)
+{
+ struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.tbtt = vif->bss_conf.sync_tsf;
+ msg_body.dtim_period = vif_priv->dtim_period;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_enter_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
+{
+ struct wcn36xx_hal_set_power_params_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
+
+ /*
+ * When host is down ignore every second dtim
+ */
+ if (ignore_dtim) {
+ msg_body.ignore_dtim = 1;
+ msg_body.dtim_period = 2;
+ }
+ msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_power_params failed\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+/* Notice: This function should be called after associated, or else it
+ * will be invalid
+ */
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type)
+{
+ struct wcn36xx_hal_keep_alive_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
+
+ if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
+ msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
+ } else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
+ /* TODO: it also support ARP response type */
+ } else {
+ wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
+ return -EINVAL;
+ }
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5)
+{
+ struct wcn36xx_hal_dump_cmd_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
+
+ msg_body.arg1 = arg1;
+ msg_body.arg2 = arg2;
+ msg_body.arg3 = arg3;
+ msg_body.arg4 = arg4;
+ msg_body.arg5 = arg5;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_dump_cmd failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static inline void set_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+static inline int get_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+ int ret = 0;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return -EINVAL;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+ return ret;
+}
+
+static inline void clear_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] &= ~(1 << bit_idx);
+}
+
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_feat_caps_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
+
+ set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
+
+ msg_body.sta_index = sta_index;
+ memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
+ msg_body.dialog_token = 0x10;
+ msg_body.tid = tid;
+
+ /* Immediate BA because Delayed BA is not supported */
+ msg_body.policy = 1;
+ msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
+ msg_body.timeout = 0;
+ if (ssn)
+ msg_body.ssn = *ssn;
+ msg_body.direction = direction;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba_session failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_add_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
+{
+ struct wcn36xx_hal_del_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
+
+ msg_body.sta_index = sta_index;
+ msg_body.tid = tid;
+ msg_body.direction = 0;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_del_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_trigger_ba_req_msg msg_body;
+ struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.candidate_cnt = 1;
+ msg_body.header.len += sizeof(*candidate);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+ (wcn->hal_buf + sizeof(msg_body));
+ candidate->sta_index = sta_index;
+ candidate->tid_bitmap = 1;
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_trigger_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Bad TX complete indication\n");
+ return -EIO;
+ }
+
+ wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
+
+ return 0;
+}
+
+static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ /* Old FW does not have bss index */
+ if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ tmp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ }
+ return 0;
+ }
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted missed beacon indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (tmp->bss_index == rsp->bss_index) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ rsp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
+ return -ENOENT;
+}
+
+static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
+ struct wcn36xx_vif *tmp;
+ struct ieee80211_sta *sta = NULL;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted delete sta indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
+ sta = container_of((void *)tmp->sta,
+ struct ieee80211_sta,
+ drv_priv);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "delete station indication %pM index %d\n",
+ rsp->addr2,
+ rsp->sta_id);
+ ieee80211_report_low_ack(sta, 0);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("STA with addr %pM and index %d not found\n",
+ rsp->addr2,
+ rsp->sta_id);
+ return -ENOENT;
+}
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
+{
+ struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
+ size_t len;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
+ len = msg_body.header.len;
+
+ put_cfg_tlv_u32(wcn, &len, cfg_id, value);
+ body->header.len = len;
+ body->len = len - sizeof(*body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_cfg failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_msg_header *msg_header = buf;
+ struct wcn36xx_hal_ind_msg *msg_ind;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_START_RSP:
+ case WCN36XX_HAL_CONFIG_STA_RSP:
+ case WCN36XX_HAL_CONFIG_BSS_RSP:
+ case WCN36XX_HAL_ADD_STA_SELF_RSP:
+ case WCN36XX_HAL_STOP_RSP:
+ case WCN36XX_HAL_DEL_STA_SELF_RSP:
+ case WCN36XX_HAL_DELETE_STA_RSP:
+ case WCN36XX_HAL_INIT_SCAN_RSP:
+ case WCN36XX_HAL_START_SCAN_RSP:
+ case WCN36XX_HAL_END_SCAN_RSP:
+ case WCN36XX_HAL_FINISH_SCAN_RSP:
+ case WCN36XX_HAL_DOWNLOAD_NV_RSP:
+ case WCN36XX_HAL_DELETE_BSS_RSP:
+ case WCN36XX_HAL_SEND_BEACON_RSP:
+ case WCN36XX_HAL_SET_LINK_ST_RSP:
+ case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
+ case WCN36XX_HAL_SET_BSSKEY_RSP:
+ case WCN36XX_HAL_SET_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_BSSKEY_RSP:
+ case WCN36XX_HAL_ENTER_BMPS_RSP:
+ case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
+ case WCN36XX_HAL_EXIT_BMPS_RSP:
+ case WCN36XX_HAL_KEEP_ALIVE_RSP:
+ case WCN36XX_HAL_DUMP_COMMAND_RSP:
+ case WCN36XX_HAL_ADD_BA_SESSION_RSP:
+ case WCN36XX_HAL_ADD_BA_RSP:
+ case WCN36XX_HAL_DEL_BA_RSP:
+ case WCN36XX_HAL_TRIGGER_BA_RSP:
+ case WCN36XX_HAL_UPDATE_CFG_RSP:
+ case WCN36XX_HAL_JOIN_RSP:
+ case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
+ case WCN36XX_HAL_CH_SWITCH_RSP:
+ case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+ memcpy(wcn->hal_buf, buf, len);
+ wcn->hal_rsp_len = len;
+ complete(&wcn->hal_rsp_compl);
+ break;
+
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ mutex_lock(&wcn->hal_ind_mutex);
+ msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ memcpy(msg_ind->msg, buf, len);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ mutex_unlock(&wcn->hal_ind_mutex);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+}
+static void wcn36xx_ind_smd_work(struct work_struct *work)
+{
+ struct wcn36xx *wcn =
+ container_of(work, struct wcn36xx, hal_ind_work);
+ struct wcn36xx_hal_msg_header *msg_header;
+ struct wcn36xx_hal_ind_msg *hal_ind_msg;
+
+ mutex_lock(&wcn->hal_ind_mutex);
+
+ hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
+ struct wcn36xx_hal_ind_msg,
+ list);
+
+ msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ wcn36xx_smd_tx_compl_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ wcn36xx_smd_missed_beacon_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ wcn36xx_smd_delete_sta_context_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+ list_del(wcn->hal_ind_queue.next);
+ kfree(hal_ind_msg->msg);
+ kfree(hal_ind_msg);
+ mutex_unlock(&wcn->hal_ind_mutex);
+}
+int wcn36xx_smd_open(struct wcn36xx *wcn)
+{
+ int ret = 0;
+ wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
+ if (!wcn->hal_ind_wq) {
+ wcn36xx_err("failed to allocate wq\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
+ INIT_LIST_HEAD(&wcn->hal_ind_queue);
+ mutex_init(&wcn->hal_ind_mutex);
+
+ ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
+ if (ret) {
+ wcn36xx_err("failed to open control channel\n");
+ goto free_wq;
+ }
+
+ return ret;
+
+free_wq:
+ destroy_workqueue(wcn->hal_ind_wq);
+out:
+ return ret;
+}
+
+void wcn36xx_smd_close(struct wcn36xx *wcn)
+{
+ wcn->ctrl_ops->close();
+ destroy_workqueue(wcn->hal_ind_wq);
+ mutex_destroy(&wcn->hal_ind_mutex);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
new file mode 100644
index 0000000..e7c3901
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SMD_H_
+#define _SMD_H_
+
+#include "wcn36xx.h"
+
+/* Max shared size is 4k but we take less.*/
+#define WCN36XX_NV_FRAGMENT_SIZE 3072
+
+#define WCN36XX_HAL_BUF_SIZE 4096
+
+#define HAL_MSG_TIMEOUT 200
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+/* The PNO version info be contained in the rsp msg */
+#define WCN36XX_FW_MSG_PNO_VERSION_MASK 0x8000
+
+enum wcn36xx_fw_msg_result {
+ WCN36XX_FW_MSG_RESULT_SUCCESS = 0,
+ WCN36XX_FW_MSG_RESULT_SUCCESS_SYNC = 1,
+
+ WCN36XX_FW_MSG_RESULT_MEM_FAIL = 5,
+};
+
+/******************************/
+/* SMD requests and responses */
+/******************************/
+struct wcn36xx_fw_msg_status_rsp {
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_ind_msg {
+ struct list_head list;
+ u8 *msg;
+ size_t msg_len;
+};
+
+struct wcn36xx;
+
+int wcn36xx_smd_open(struct wcn36xx *wcn);
+void wcn36xx_smd_close(struct wcn36xx *wcn);
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
+int wcn36xx_smd_start(struct wcn36xx *wcn);
+int wcn36xx_smd_stop(struct wcn36xx *wcn);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch);
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state);
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update);
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off);
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch);
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index);
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key);
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index);
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx);
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim);
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type);
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5);
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
new file mode 100644
index 0000000..b2b60e3
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "txrx.h"
+
+static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
+{
+ return 100 - ((bd->phy_stat0 >> 24) & 0xff);
+}
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status status;
+ struct ieee80211_hdr *hdr;
+ struct wcn36xx_rx_bd *bd;
+ u16 fc, sn;
+
+ /*
+ * All fields must be 0, otherwise it can lead to
+ * unexpected consequences.
+ */
+ memset(&status, 0, sizeof(status));
+
+ bd = (struct wcn36xx_rx_bd *)skb->data;
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
+ "BD <<< ", (char *)bd,
+ sizeof(struct wcn36xx_rx_bd));
+
+ skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+ skb_pull(skb, bd->pdu.mpdu_header_off);
+
+ status.mactime = 10;
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ status.band = WCN36XX_BAND(wcn);
+ status.signal = -get_rssi0(bd);
+ status.antenna = 1;
+ status.rate_idx = 1;
+ status.flag = 0;
+ status.rx_flags = 0;
+ status.flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_DECRYPTED;
+
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
+ status.flag, status.vendor_radiotap_len);
+
+ memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = __le16_to_cpu(hdr->frame_control);
+ sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ }
+
+ ieee80211_rx_irqsafe(wcn->hw, skb);
+
+ return 0;
+}
+
+static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
+ u32 mpdu_header_len,
+ u32 len,
+ u16 tid)
+{
+ bd->pdu.mpdu_header_len = mpdu_header_len;
+ bd->pdu.mpdu_header_off = sizeof(*bd);
+ bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
+ bd->pdu.mpdu_header_off;
+ bd->pdu.mpdu_len = len;
+ bd->pdu.tid = tid;
+}
+
+static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
+ u8 *addr)
+{
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
+ return vif_priv;
+ }
+ wcn36xx_warn("vif %pM not found\n", addr);
+ return NULL;
+}
+static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct wcn36xx_sta *sta_priv,
+ struct ieee80211_hdr *hdr,
+ bool bcast)
+{
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *__vif_priv = NULL;
+ bd->bd_rate = WCN36XX_BD_RATE_DATA;
+
+ /*
+ * For not unicast frames mac80211 will not set sta pointer so use
+ * self_sta_index instead.
+ */
+ if (sta_priv) {
+ __vif_priv = sta_priv->vif;
+ vif = container_of((void *)__vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bd->sta_index = sta_priv->bss_sta_index;
+ bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bd->sta_index = sta_priv->sta_index;
+ bd->dpu_desc_idx = sta_priv->dpu_desc_index;
+ }
+ } else {
+ __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ }
+
+ bd->dpu_sign = __vif_priv->ucast_dpu_signature;
+
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ (sta_priv && !sta_priv->is_data_encrypted))
+ bd->dpu_ne = 1;
+
+ if (bcast) {
+ bd->ub = 1;
+ bd->ack_policy = 1;
+ }
+ *vif_priv = __vif_priv;
+}
+
+static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct ieee80211_hdr *hdr,
+ bool bcast)
+{
+ struct wcn36xx_vif *__vif_priv =
+ get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_ne = 1;
+
+ /* default rate for unicast */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+ WCN36XX_BD_RATE_CTRL :
+ WCN36XX_BD_RATE_MGMT;
+ else if (ieee80211_is_ctl(hdr->frame_control))
+ bd->bd_rate = WCN36XX_BD_RATE_CTRL;
+ else
+ wcn36xx_warn("frame control type unknown\n");
+
+ /*
+ * In joining state trick hardware that probe is sent as
+ * unicast even if address is broadcast.
+ */
+ if (__vif_priv->is_joining &&
+ ieee80211_is_probe_req(hdr->frame_control))
+ bcast = false;
+
+ if (bcast) {
+ /* broadcast */
+ bd->ub = 1;
+ /* No ack needed not unicast */
+ bd->ack_policy = 1;
+ bd->queue_id = WCN36XX_TX_B_WQ_ID;
+ } else
+ bd->queue_id = WCN36XX_TX_U_WQ_ID;
+ *vif_priv = __vif_priv;
+}
+
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ unsigned long flags;
+ bool is_low = ieee80211_is_data(hdr->frame_control);
+ bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+ struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low);
+
+ if (!bd) {
+ /*
+ * TX DXE are used in pairs. One for the BD and one for the
+ * actual frame. The BD DXE's has a preallocated buffer while
+ * the skb ones does not. If this isn't true something is really
+ * wierd. TODO: Recover from this situation
+ */
+
+ wcn36xx_err("bd address may not be NULL for BD DXE\n");
+ return -EINVAL;
+ }
+
+ memset(bd, 0, sizeof(*bd));
+
+ wcn36xx_dbg(WCN36XX_DBG_TX,
+ "tx skb %p len %d fc %04x sn %d %s %s\n",
+ skb, skb->len, __le16_to_cpu(hdr->frame_control),
+ IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
+ is_low ? "low" : "high", bcast ? "bcast" : "ucast");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
+
+ bd->dpu_rf = WCN36XX_BMU_WQ_TX;
+
+ bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS;
+ if (bd->tx_comp) {
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ if (wcn->tx_ack_skb) {
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+ wcn36xx_warn("tx_ack_skb already set\n");
+ return -EINVAL;
+ }
+
+ wcn->tx_ack_skb = skb;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ /* Only one at a time is supported by fw. Stop the TX queues
+ * until the ack status gets back.
+ *
+ * TODO: Add watchdog in case FW does not answer
+ */
+ ieee80211_stop_queues(wcn->hw);
+ }
+
+ /* Data frames served first*/
+ if (is_low) {
+ wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast);
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, sta_priv ? sta_priv->tid : 0);
+ } else {
+ /* MGMT and CTRL frames are handeld here*/
+ wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast);
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, WCN36XX_TID);
+ }
+
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ bd->tx_bd_sign = 0xbdbdbdbd;
+
+ return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
new file mode 100644
index 0000000..bbfbcf8
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include <linux/etherdevice.h>
+#include "wcn36xx.h"
+
+/* TODO describe all properties */
+#define WCN36XX_802_11_HEADER_LEN 24
+#define WCN36XX_BMU_WQ_TX 25
+#define WCN36XX_TID 7
+/* broadcast wq ID */
+#define WCN36XX_TX_B_WQ_ID 0xA
+#define WCN36XX_TX_U_WQ_ID 0x9
+/* bd_rate */
+#define WCN36XX_BD_RATE_DATA 0
+#define WCN36XX_BD_RATE_MGMT 2
+#define WCN36XX_BD_RATE_CTRL 3
+
+struct wcn36xx_pdu {
+ u32 dpu_fb:8;
+ u32 adu_fb:8;
+ u32 pdu_id:16;
+
+ /* 0x04*/
+ u32 tail_pdu_idx:16;
+ u32 head_pdu_idx:16;
+
+ /* 0x08*/
+ u32 pdu_count:7;
+ u32 mpdu_data_off:9;
+ u32 mpdu_header_off:8;
+ u32 mpdu_header_len:8;
+
+ /* 0x0c*/
+ u32 reserved4:8;
+ u32 tid:4;
+ u32 reserved3:4;
+ u32 mpdu_len:16;
+};
+
+struct wcn36xx_rx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 rx_key_id:3;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 uma_bypass:1;
+ u32 csr11:1;
+ u32 reserved0:1;
+ u32 scan_learn:1;
+ u32 rx_ch:4;
+ u32 rtsf:1;
+ u32 bsf:1;
+ u32 a2hf:1;
+ u32 st_auf:1;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 addr3:8;
+ u32 addr2:8;
+ u32 addr1:8;
+ u32 dpu_desc_idx:8;
+
+ /* 0x18*/
+ u32 rxp_flags:23;
+ u32 rate_id:9;
+
+ u32 phy_stat0;
+ u32 phy_stat1;
+
+ /* 0x24 */
+ u32 rx_times;
+
+ u32 pmi_cmd[6];
+
+ /* 0x40 */
+ u32 reserved7:4;
+ u32 reorder_slot_id:6;
+ u32 reorder_fwd_id:6;
+ u32 reserved6:12;
+ u32 reorder_code:4;
+
+ /* 0x44 */
+ u32 exp_seq_num:12;
+ u32 cur_seq_num:12;
+ u32 fr_type_subtype:8;
+
+ /* 0x48 */
+ u32 msdu_size:16;
+ u32 sub_fr_id:4;
+ u32 proc_order:4;
+ u32 reserved9:4;
+ u32 aef:1;
+ u32 lsf:1;
+ u32 esf:1;
+ u32 asf:1;
+};
+
+struct wcn36xx_tx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 fw_tx_comp:1;
+ u32 tx_comp:1;
+ u32 reserved1:1;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 reserved0:12;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 reserved5:7;
+ u32 queue_id:5;
+ u32 bd_rate:2;
+ u32 ack_policy:2;
+ u32 sta_index:8;
+ u32 dpu_desc_idx:8;
+
+ u32 tx_bd_sign;
+ u32 reserved6;
+ u32 dxe_start_time;
+ u32 dxe_end_time;
+
+ /*u32 tcp_udp_start_off:10;
+ u32 header_cks:16;
+ u32 reserved7:6;*/
+};
+
+struct wcn36xx_sta;
+struct wcn36xx;
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb);
+
+#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
new file mode 100644
index 0000000..58b6383
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_H_
+#define _WCN36XX_H_
+
+#include <linux/completion.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+#include "hal.h"
+#include "smd.h"
+#include "txrx.h"
+#include "dxe.h"
+#include "pmc.h"
+#include "debug.h"
+
+#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+#define WCN36XX_AGGR_BUFFER_SIZE 64
+
+extern unsigned int wcn36xx_dbg_mask;
+
+enum wcn36xx_debug_mask {
+ WCN36XX_DBG_DXE = 0x00000001,
+ WCN36XX_DBG_DXE_DUMP = 0x00000002,
+ WCN36XX_DBG_SMD = 0x00000004,
+ WCN36XX_DBG_SMD_DUMP = 0x00000008,
+ WCN36XX_DBG_RX = 0x00000010,
+ WCN36XX_DBG_RX_DUMP = 0x00000020,
+ WCN36XX_DBG_TX = 0x00000040,
+ WCN36XX_DBG_TX_DUMP = 0x00000080,
+ WCN36XX_DBG_HAL = 0x00000100,
+ WCN36XX_DBG_HAL_DUMP = 0x00000200,
+ WCN36XX_DBG_MAC = 0x00000400,
+ WCN36XX_DBG_BEACON = 0x00000800,
+ WCN36XX_DBG_BEACON_DUMP = 0x00001000,
+ WCN36XX_DBG_PMC = 0x00002000,
+ WCN36XX_DBG_PMC_DUMP = 0x00004000,
+ WCN36XX_DBG_ANY = 0xffffffff,
+};
+
+#define wcn36xx_err(fmt, arg...) \
+ printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+
+#define wcn36xx_warn(fmt, arg...) \
+ printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
+
+#define wcn36xx_info(fmt, arg...) \
+ printk(KERN_INFO pr_fmt(fmt), ##arg)
+
+#define wcn36xx_dbg(mask, fmt, arg...) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##arg); \
+} while (0)
+
+#define wcn36xx_dbg_dump(mask, prefix_str, buf, len) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ print_hex_dump(KERN_DEBUG, pr_fmt(prefix_str), \
+ DUMP_PREFIX_OFFSET, 32, 1, \
+ buf, len, false); \
+} while (0)
+
+#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
+#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
+#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
+#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
+#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
+
+static inline void buff_to_be(u32 *buf, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+}
+
+struct nv_data {
+ int is_valid;
+ u8 table;
+};
+
+/* Interface for platform control path
+ *
+ * @open: hook must be called when wcn36xx wants to open control channel.
+ * @tx: sends a buffer.
+ */
+struct wcn36xx_platform_ctrl_ops {
+ int (*open)(void *drv_priv, void *rsp_cb);
+ void (*close)(void);
+ int (*tx)(char *buf, size_t len);
+ int (*get_hw_mac)(u8 *addr);
+ int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
+};
+
+/**
+ * struct wcn36xx_vif - holds VIF related fields
+ *
+ * @bss_index: bss_index is initially set to 0xFF. bss_index is received from
+ * HW after first config_bss call and must be used in delete_bss and
+ * enter/exit_bmps.
+ */
+struct wcn36xx_vif {
+ struct list_head list;
+ struct wcn36xx_sta *sta;
+ u8 dtim_period;
+ enum ani_ed_type encrypt_type;
+ bool is_joining;
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Power management */
+ enum wcn36xx_power_state pw_state;
+
+ u8 bss_index;
+ u8 ucast_dpu_signature;
+ /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
+ u8 self_sta_index;
+ u8 self_dpu_desc_index;
+};
+
+/**
+ * struct wcn36xx_sta - holds STA related fields
+ *
+ * @tid: traffic ID that is used during AMPDU and in TX BD.
+ * @sta_index: STA index is returned from HW after config_sta call and is
+ * used in both SMD channel and TX BD.
+ * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
+ * call and is used in TX BD.
+ * @bss_sta_index: STA index is returned from HW after config_bss call and is
+ * used in both SMD channel and TX BD. See table bellow when it is used.
+ * @bss_dpu_desc_index: DPU descriptor index is returned from HW after
+ * config_bss call and is used in TX BD.
+ * ______________________________________________
+ * | | STA | AP |
+ * |______________|_____________|_______________|
+ * | TX BD |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |all SMD calls |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |smd_delete_sta| sta_index | sta_index |
+ * |______________|_____________|_______________|
+ */
+struct wcn36xx_sta {
+ struct wcn36xx_vif *vif;
+ u16 aid;
+ u16 tid;
+ u8 sta_index;
+ u8 dpu_desc_index;
+ u8 bss_sta_index;
+ u8 bss_dpu_desc_index;
+ bool is_data_encrypted;
+ /* Rates */
+ struct wcn36xx_hal_supported_rates supported_rates;
+};
+struct wcn36xx_dxe_ch;
+struct wcn36xx {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+ struct list_head vif_list;
+
+ u8 fw_revision;
+ u8 fw_version;
+ u8 fw_minor;
+ u8 fw_major;
+
+ /* extra byte for the NULL termination */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+
+ /* IRQs */
+ int tx_irq;
+ int rx_irq;
+ void __iomem *mmio;
+
+ struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+ /*
+ * smd_buf must be protected with smd_mutex to garantee
+ * that all messages are sent one after another
+ */
+ u8 *hal_buf;
+ size_t hal_rsp_len;
+ struct mutex hal_mutex;
+ struct completion hal_rsp_compl;
+ struct workqueue_struct *hal_ind_wq;
+ struct work_struct hal_ind_work;
+ struct mutex hal_ind_mutex;
+ struct list_head hal_ind_queue;
+
+ /* DXE channels */
+ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */
+ struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */
+ struct wcn36xx_dxe_ch dxe_rx_l_ch; /* RX low */
+ struct wcn36xx_dxe_ch dxe_rx_h_ch; /* RX high */
+
+ /* For synchronization of DXE resources from BH, IRQ and WQ contexts */
+ spinlock_t dxe_lock;
+ bool queues_stopped;
+
+ /* Memory pools */
+ struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
+ struct wcn36xx_dxe_mem_pool data_mem_pool;
+
+ struct sk_buff *tx_ack_skb;
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+ /* Debug file system entry */
+ struct wcn36xx_dfs_entry dfs;
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+};
+
+static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
+ u8 major,
+ u8 minor,
+ u8 version,
+ u8 revision)
+{
+ return (wcn->fw_major == major &&
+ wcn->fw_minor == minor &&
+ wcn->fw_version == version &&
+ wcn->fw_revision == revision);
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+
+#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eb1dc7a..eeceab3 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -197,7 +197,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_iounmap(pdev, wil->csr);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 64f4a2b..091c905 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -34,6 +34,7 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "sdio_host.h"
+#include "sdio_chip.h"
#include "dhd_dbg.h"
#include "dhd_bus.h"
@@ -41,13 +42,6 @@
#define DMA_ALIGN_MASK 0x03
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
-#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335 0x4335
-
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -58,7 +52,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -468,7 +463,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
brcmf_dbg(SDIO, "Enter\n");
- brcmfmac_sdio_pdata = pdev->dev.platform_data;
+ brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
if (brcmfmac_sdio_pdata->power_on)
brcmfmac_sdio_pdata->power_on();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 2eb9e64..4de9aac 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -97,8 +97,6 @@
#define WLC_PHY_TYPE_LCN 8
#define WLC_PHY_TYPE_NULL 0xf
-#define BRCMF_EVENTING_MASK_LEN 16
-
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index f7c1985..200ee9b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -136,7 +136,7 @@ extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
+extern void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index e067aec..42bf19a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -509,9 +509,8 @@ netif_rx:
}
}
-void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
{
- struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@@ -519,29 +518,24 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
u8 ifidx;
int ret;
- brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
- skb_queue_len(skb_list));
+ brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
- skb_queue_walk_safe(skb_list, skb, pnext) {
- skb_unlink(skb, skb_list);
-
- /* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
- ifp = drvr->iflist[ifidx];
-
- if (ret || !ifp || !ifp->ndev) {
- if ((ret != -ENODATA) && ifp)
- ifp->stats.rx_errors++;
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
+ ifp = drvr->iflist[ifidx];
- rd = (struct brcmf_skb_reorder_data *)skb->cb;
- if (rd->reorder)
- brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
- else
- brcmf_netif_rx(ifp, skb);
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+ ifp->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ return;
}
+
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ if (rd->reorder)
+ brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
+ else
+ brcmf_netif_rx(ifp, skb);
}
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1aa75d5..67f05db 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -275,11 +275,6 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-#define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
-#define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
-MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
-MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
-
#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
* when idle
@@ -454,9 +449,6 @@ struct brcmf_sdio {
struct work_struct datawork;
atomic_t dpc_tskcnt;
- const struct firmware *firmware;
- u32 fw_ptr;
-
bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
@@ -493,6 +485,100 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
+#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
+#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
+#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
+#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
+#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
+#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
+#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
+#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
+#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
+#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
+#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
+#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
+#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
+#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
+
+MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+
+struct brcmf_firmware_names {
+ u32 chipid;
+ u32 revmsk;
+ const char *bin;
+ const char *nv;
+};
+
+enum brcmf_firmware_type {
+ BRCMF_FIRMWARE_BIN,
+ BRCMF_FIRMWARE_NVRAM
+};
+
+#define BRCMF_FIRMWARE_NVRAM(name) \
+ name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
+
+static const struct brcmf_firmware_names brcmf_fwname_data[] = {
+ { BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
+ { BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
+ { BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+ { BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
+ { BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
+ { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+ { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+};
+
+
+static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+ enum brcmf_firmware_type type)
+{
+ const struct firmware *fw;
+ const char *name;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+ if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
+ brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
+ switch (type) {
+ case BRCMF_FIRMWARE_BIN:
+ name = brcmf_fwname_data[i].bin;
+ break;
+ case BRCMF_FIRMWARE_NVRAM:
+ name = brcmf_fwname_data[i].nv;
+ break;
+ default:
+ brcmf_err("invalid firmware type (%d)\n", type);
+ return NULL;
+ }
+ goto found;
+ }
+ }
+ brcmf_err("Unknown chipid %d [%d]\n",
+ bus->ci->chip, bus->ci->chiprev);
+ return NULL;
+
+found:
+ err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
+ if ((err) || (!fw)) {
+ brcmf_err("fail to request firmware %s (%d)\n", name, err);
+ return NULL;
+ }
+
+ return fw;
+}
+
static void pkt_align(struct sk_buff *p, int len, int align)
{
uint datalign;
@@ -1406,13 +1492,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
+ skb_unlink(pfirst, &bus->glom);
+ brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+ bus->sdcnt.rxglompkts++;
}
- /* sent any remaining packets up */
- if (bus->glom.qlen)
- brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
bus->sdcnt.rxglomframes++;
- bus->sdcnt.rxglompkts += bus->glom.qlen;
}
return num;
}
@@ -1557,7 +1642,6 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt; /* Packet for event or data frames */
- struct sk_buff_head pktlist; /* needed for bus interface */
u16 pad; /* Number of pad bytes to read */
uint rxleft = 0; /* Remaining number of frames allowed */
int ret; /* Return code from calls */
@@ -1759,9 +1843,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
continue;
}
- skb_queue_head_init(&pktlist);
- skb_queue_tail(&pktlist, pkt);
- brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
+ brcmf_rx_frame(bus->sdiodev->dev, pkt);
}
rxcount = maxframes - rxleft;
@@ -1786,10 +1868,15 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
return;
}
+/**
+ * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
+ * bus layer usage.
+ */
/* flag marking a dummy skb added for DMA alignment requirement */
-#define DUMMY_SKB_FLAG 0x10000
+#define ALIGN_SKB_FLAG 0x8000
/* bit mask of data length chopped from the previous packet */
-#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
+#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
+
/**
* brcmf_sdio_txpkt_prep - packet preparation for transmit
* @bus: brcmf_sdio structure pointer
@@ -1854,7 +1941,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
memcpy(pkt_new->data,
pkt_next->data + pkt_next->len - tail_chop,
tail_chop);
- *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
+ *(u32 *)(pkt_new->cb) = ALIGN_SKB_FLAG + tail_chop;
skb_trim(pkt_next, pkt_next->len - tail_chop);
__skb_queue_after(pktq, pkt_next, pkt_new);
} else {
@@ -1908,8 +1995,8 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
skb_queue_walk_safe(pktq, pkt_next, tmp) {
dummy_flags = *(u32 *)(pkt_next->cb);
- if (dummy_flags & DUMMY_SKB_FLAG) {
- chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+ if (dummy_flags & ALIGN_SKB_FLAG) {
+ chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
pkt_prev = pkt_next->prev;
memcpy(pkt_prev->data + pkt_prev->len,
@@ -3037,69 +3124,43 @@ static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
return true;
}
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
-{
- if (bus->firmware->size < bus->fw_ptr + len)
- len = bus->firmware->size - bus->fw_ptr;
-
- memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
- bus->fw_ptr += len;
- return len;
-}
-
static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
+ const struct firmware *fw;
+ int err;
int offset;
- uint len;
- u8 *memblock = NULL, *memptr;
- int ret;
- u8 idx;
-
- brcmf_dbg(INFO, "Enter\n");
-
- ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
- &bus->sdiodev->func[2]->dev);
- if (ret) {
- brcmf_err("Fail to request firmware %d\n", ret);
- return ret;
- }
- bus->fw_ptr = 0;
-
- memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
- if (memblock == NULL) {
- ret = -ENOMEM;
- goto err;
- }
- if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
- memptr += (BRCMF_SDALIGN -
- ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
-
- offset = bus->ci->rambase;
-
- /* Download image */
- len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
- if (BRCMF_MAX_CORENUM != idx)
- memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
- while (len) {
- ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
- if (ret) {
+ int address;
+ int len;
+
+ fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
+ if (fw == NULL)
+ return -ENOENT;
+
+ if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
+ BRCMF_MAX_CORENUM)
+ memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
+
+ err = 0;
+ offset = 0;
+ address = bus->ci->rambase;
+ while (offset < fw->size) {
+ len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
+ fw->size - offset;
+ err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
+ (u8 *)&fw->data[offset], len);
+ if (err) {
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
- ret, MEMBLOCK, offset);
- goto err;
+ err, len, address);
+ goto failure;
}
-
- offset += MEMBLOCK;
- len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
+ offset += len;
+ address += len;
}
-err:
- kfree(memblock);
+failure:
+ release_firmware(fw);
- release_firmware(bus->firmware);
- bus->fw_ptr = 0;
-
- return ret;
+ return err;
}
/*
@@ -3111,7 +3172,8 @@ err:
* by two NULs.
*/
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
+static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
+ const struct firmware *nv)
{
char *varbuf;
char *dp;
@@ -3120,12 +3182,12 @@ static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
int ret = 0;
uint buf_len, n, len;
- len = bus->firmware->size;
+ len = nv->size;
varbuf = vmalloc(len);
if (!varbuf)
return -ENOMEM;
- memcpy(varbuf, bus->firmware->data, len);
+ memcpy(varbuf, nv->data, len);
dp = varbuf;
findNewline = false;
@@ -3177,18 +3239,16 @@ err:
static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
+ const struct firmware *nv;
int ret;
- ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
- &bus->sdiodev->func[2]->dev);
- if (ret) {
- brcmf_err("Fail to request nvram %d\n", ret);
- return ret;
- }
+ nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+ if (nv == NULL)
+ return -ENOENT;
- ret = brcmf_process_nvram_vars(bus);
+ ret = brcmf_process_nvram_vars(bus, nv);
- release_firmware(bus->firmware);
+ release_firmware(nv);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index e679214..14bc24d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -102,7 +102,8 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
- BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+ BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+ BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
@@ -114,6 +115,8 @@ enum brcmf_fweh_event_code {
};
#undef BRCMF_ENUM_DEF
+#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
+
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 82f9140..d0cd0bf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -168,6 +168,7 @@ enum brcmf_fws_skb_state {
/**
* struct brcmf_skbuff_cb - control buffer associated with skbuff.
*
+ * @bus_flags: 2 bytes reserved for bus specific parameters
* @if_flags: holds interface index and packet related flags.
* @htod: host to device packet identifier (used in PKTTAG tlv).
* @state: transmit state of the packet.
@@ -177,6 +178,7 @@ enum brcmf_fws_skb_state {
* provides 48 bytes of storage so this structure should not exceed that.
*/
struct brcmf_skbuff_cb {
+ u16 bus_flags;
u16 if_flags;
u32 htod;
enum brcmf_fws_skb_state state;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index ca72177..2096a14 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/bcma/bcma.h>
@@ -136,6 +137,8 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return false;
regdata = brcmf_sdio_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
@@ -154,6 +157,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
bool ret;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return false;
regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
NULL);
@@ -261,6 +266,8 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/* if core is already in reset, just return */
regdata = brcmf_sdio_regrl(sdiodev,
@@ -304,6 +311,8 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/*
* Must do the disable sequence first to work for
@@ -368,6 +377,8 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/* must disable first to work for arbitrary current core state */
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
@@ -444,6 +455,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ ci->chiprev >= 2)
+ ci->chip = BCM4339_CHIP_ID;
ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@@ -541,6 +555,20 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->ramsize = 0xc0000;
ci->rambase = 0x180000;
break;
+ case BCM4339_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2e084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18005000;
+ ci->c_inf[1].wrapbase = 0x18105000;
+ ci->c_inf[1].cib = 0x15004211;
+ ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+ ci->c_inf[2].base = 0x18002000;
+ ci->c_inf[2].wrapbase = 0x18102000;
+ ci->c_inf[2].cib = 0x04084411;
+ ci->ramsize = 0xc0000;
+ ci->rambase = 0x180000;
+ break;
default:
brcmf_err("chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 83c041f..076b83c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,6 +54,14 @@
#define BRCMF_MAX_CORENUM 6
+/* SDIO device ID */
+#define SDIO_DEVICE_ID_BROADCOM_43143 43143
+#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+
struct chip_core_info {
u16 id;
u16 rev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 39e01a7..bf6758d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -435,7 +435,6 @@ static void brcmf_usb_rx_complete(struct urb *urb)
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
struct sk_buff *skb;
- struct sk_buff_head skbq;
brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
brcmf_usb_del_fromq(devinfo, req);
@@ -450,10 +449,8 @@ static void brcmf_usb_rx_complete(struct urb *urb)
}
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
- skb_queue_head_init(&skbq);
- skb_queue_tail(&skbq, skb);
skb_put(skb, urb->actual_length);
- brcmf_rx_frames(devinfo->dev, &skbq);
+ brcmf_rx_frame(devinfo->dev, skb);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 4608e0e..df6229e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -5695,7 +5695,7 @@ static bool brcms_c_chipmatch_pci(struct bcma_device *core)
return true;
if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
return true;
- if (device == BCM4313_D11N2G_ID)
+ if (device == BCM4313_D11N2G_ID || device == BCM4313_CHIP_ID)
return true;
if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
return true;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index c1fe245..84113ea 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,5 +41,6 @@
#define BCM4331_CHIP_ID 0x4331
#define BCM4334_CHIP_ID 0x4334
#define BCM4335_CHIP_ID 0x4335
+#define BCM4339_CHIP_ID 0x4339
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index f5e6b48..e310752 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -375,7 +375,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
static int cw1200_spi_probe(struct spi_device *func)
{
const struct cw1200_platform_data_spi *plat_data =
- func->dev.platform_data;
+ dev_get_platdata(&func->dev);
struct hwbus_priv *self;
int status;
@@ -453,7 +453,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
kfree(self);
}
- cw1200_spi_off(func->dev.platform_data);
+ cw1200_spi_off(dev_get_platdata(&func->dev));
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 6b823a1..f394af7 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11885,7 +11885,6 @@ static int ipw_pci_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
out_free_libipw:
free_libipw(priv->net_dev, 0);
out:
@@ -11966,7 +11965,6 @@ static void ipw_pci_remove(struct pci_dev *pdev)
iounmap(priv->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
/* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->a_band.channels);
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 9581d07..dea3b50 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3811,7 +3811,6 @@ out_iounmap:
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
ieee80211_free_hw(il->hw);
@@ -3888,7 +3887,6 @@ il3945_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
il_free_channel_map(il);
il_free_geos(il);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 5ab50a5..3982ab7 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6706,7 +6706,6 @@ out_free_eeprom:
out_iounmap:
iounmap(il->hw_base);
out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
@@ -6787,7 +6786,6 @@ il4965_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
il4965_uninit_drv(il);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index bad95d2..c3f904d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1401,6 +1401,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+ err = pci_enable_device(pdev);
+ if (err)
+ goto out_no_pci;
+
if (!cfg->base_params->pcie_l1_allowed) {
/*
* W/A - seems to solve weird behavior. We need to remove this
@@ -1412,10 +1416,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM);
}
- err = pci_enable_device(pdev);
- if (err)
- goto out_no_pci;
-
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 4bb6574..5d39ec8 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1128,7 +1128,7 @@ static int if_spi_probe(struct spi_device *spi)
{
struct if_spi_card *card;
struct lbs_private *priv = NULL;
- struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+ struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
int err = 0;
lbs_deb_enter(LBS_DEB_SPI);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 2d76147..fb3fa18 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1048,7 +1048,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
unsigned long scan_pending_q_flags;
- uint16_t cancel_scan_cmd = false;
+ bool cancel_scan_cmd = false;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 9d7c0e6..717fbe2 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -621,7 +621,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
int ret = 0;
struct ieee_types_assoc_rsp *assoc_rsp;
struct mwifiex_bssdescriptor *bss_desc;
- u8 enable_data = true;
+ bool enable_data = true;
u16 cap_info, status_code;
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index fd77833..408f307 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -880,7 +880,9 @@ mwifiex_add_card(void *card, struct semaphore *sem,
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
- adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
+ adapter->workqueue =
+ alloc_workqueue("MWIFIEX_WORK_QUEUE",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!adapter->workqueue)
goto err_kmalloc;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 52da8ee..33fa943 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -93,7 +93,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@@ -128,7 +128,7 @@ static int mwifiex_pcie_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@@ -2037,7 +2037,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
goto exit;
}
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
card ? card->adapter : NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c0268b5..7d66018 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -327,7 +327,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
- u16 hs_activate = false;
+ bool hs_activate = false;
if (!hscfg_param)
/* New Activate command */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 2e8f9cd..8f8fea0 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -708,7 +708,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
{
u8 *curr = (u8 *) &resp->params.get_wmm_status;
uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
- int valid = true;
+ bool valid = true;
struct mwifiex_ie_types_data *tlv_hdr;
struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a3707fd..b953ad6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -6093,7 +6093,6 @@ err_iounmap:
if (priv->sram != NULL)
pci_iounmap(pdev, priv->sram);
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
err_free_reg:
@@ -6147,7 +6146,6 @@ static void mwl8k_remove(struct pci_dev *pdev)
unmap:
pci_iounmap(pdev, priv->regs);
pci_iounmap(pdev, priv->sram);
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index d73fdf6..ffb2469 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -234,7 +234,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -265,7 +264,6 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 677bf14..5ae1191 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -184,7 +184,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -205,7 +204,6 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_release_regions(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 2559dbd..bbd36d1 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -273,7 +273,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -301,7 +300,6 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 42afeee..04b08de 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -170,7 +170,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -195,7 +194,6 @@ static void orinoco_tmd_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->bridge_io);
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index 15ea36b..cdafb8c 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -41,7 +41,7 @@ config P54_PCI
config P54_SPI
tristate "Prism54 SPI (stlc45xx) support"
- depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
+ depends on P54_COMMON && SPI_MASTER
---help---
This driver is for stlc4550 or stlc4560 based wireless chips
such as Nokia's N800/N810 Portable Internet Tablet.
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 57e3af8..f9a07b0 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -631,7 +631,6 @@ static int p54p_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
p54_free_common(dev);
err_free_reg:
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7fc46f2..de15171 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -636,7 +636,7 @@ static int p54spi_probe(struct spi_device *spi)
gpio_direction_input(p54spi_gpio_irq);
ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
- p54spi_interrupt, IRQF_DISABLED, "p54spi",
+ p54spi_interrupt, 0, "p54spi",
priv->spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "request_irq() failed");
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 68dbbb9..a18b005 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -219,6 +219,7 @@ config RT2X00_LIB_USB
config RT2X00_LIB
tristate
+ select AVERAGE
config RT2X00_LIB_FIRMWARE
boolean
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index fa33b5e..aab6b5e 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -52,6 +52,7 @@
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
* RF5592 2.4G/5G 2T2R
+ * RF3070 2.4G 1T1R
* RF5360 2.4G 1T1R
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
@@ -70,6 +71,7 @@
#define RF3322 0x000c
#define RF3053 0x000d
#define RF5592 0x000f
+#define RF3070 0x3070
#define RF3290 0x3290
#define RF5360 0x5360
#define RF5370 0x5370
@@ -122,7 +124,7 @@
/*
* MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
*/
-#define MAC_CSR0_3290 0x0000
+#define MAC_CSR0_3290 0x0000
/*
* E2PROM_CSR: PCI EEPROM control register.
@@ -211,17 +213,17 @@
/*
* COEX_CFG_0
*/
-#define COEX_CFG0 0x0040
+#define COEX_CFG0 0x0040
#define COEX_CFG_ANT FIELD32(0xff000000)
/*
* COEX_CFG_1
*/
-#define COEX_CFG1 0x0044
+#define COEX_CFG1 0x0044
/*
* COEX_CFG_2
*/
-#define COEX_CFG2 0x0048
+#define COEX_CFG2 0x0048
#define BT_COEX_CFG1 FIELD32(0xff000000)
#define BT_COEX_CFG0 FIELD32(0x00ff0000)
#define WL_COEX_CFG1 FIELD32(0x0000ff00)
@@ -235,8 +237,8 @@
#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
#define PLL_CONTROL FIELD32(0x00070000)
#define PLL_LPF_R1 FIELD32(0x00080000)
-#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
-#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
+#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
+#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
#define PLL_LOCK_CTRL FIELD32(0x70000000)
@@ -2164,7 +2166,7 @@ struct mac_iveiv_entry {
*/
#define RFCSR6_R1 FIELD8(0x03)
#define RFCSR6_R2 FIELD8(0x40)
-#define RFCSR6_TXDIV FIELD8(0x0c)
+#define RFCSR6_TXDIV FIELD8(0x0c)
/* bits for RF3053 */
#define RFCSR6_VCO_IC FIELD8(0xc0)
@@ -2202,13 +2204,13 @@ struct mac_iveiv_entry {
* RFCSR 12:
*/
#define RFCSR12_TX_POWER FIELD8(0x1f)
-#define RFCSR12_DR0 FIELD8(0xe0)
+#define RFCSR12_DR0 FIELD8(0xe0)
/*
* RFCSR 13:
*/
#define RFCSR13_TX_POWER FIELD8(0x1f)
-#define RFCSR13_DR0 FIELD8(0xe0)
+#define RFCSR13_DR0 FIELD8(0xe0)
/*
* RFCSR 15:
@@ -2226,7 +2228,7 @@ struct mac_iveiv_entry {
#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
#define RFCSR17_TX_LO1_EN FIELD8(0x08)
#define RFCSR17_R FIELD8(0x20)
-#define RFCSR17_CODE FIELD8(0x7f)
+#define RFCSR17_CODE FIELD8(0x7f)
/* RFCSR 18 */
#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
@@ -2449,7 +2451,7 @@ enum rt2800_eeprom_word {
*/
#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
-#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
+#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
/*
* EEPROM NIC Configuration 1
@@ -2471,18 +2473,18 @@ enum rt2800_eeprom_word {
* DAC_TEST: 0: disable, 1: enable
*/
#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
-#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
#define EEPROM_NIC_CONF1_CARDBUS_ACCEL FIELD16(0x0010)
#define EEPROM_NIC_CONF1_BW40M_SB_2G FIELD16(0x0020)
#define EEPROM_NIC_CONF1_BW40M_SB_5G FIELD16(0x0040)
#define EEPROM_NIC_CONF1_WPS_PBC FIELD16(0x0080)
#define EEPROM_NIC_CONF1_BW40M_2G FIELD16(0x0100)
#define EEPROM_NIC_CONF1_BW40M_5G FIELD16(0x0200)
-#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
+#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
#define EEPROM_NIC_CONF1_ANT_DIVERSITY FIELD16(0x1800)
-#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
+#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000)
#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000)
@@ -2521,9 +2523,9 @@ enum rt2800_eeprom_word {
* TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
* CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
*/
-#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
-#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
-#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
+#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
+#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
/*
* EEPROM LNA
@@ -2790,7 +2792,7 @@ enum rt2800_eeprom_word {
#define MCU_CURRENT 0x36
#define MCU_LED 0x50
#define MCU_LED_STRENGTH 0x51
-#define MCU_LED_AG_CONF 0x52
+#define MCU_LED_AG_CONF 0x52
#define MCU_LED_ACT_CONF 0x53
#define MCU_LED_LED_POLARITY 0x54
#define MCU_RADAR 0x60
@@ -2799,7 +2801,7 @@ enum rt2800_eeprom_word {
#define MCU_FREQ_OFFSET 0x74
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
-#define MCU_BAND_SELECT 0x91
+#define MCU_BAND_SELECT 0x91
/*
* MCU mailbox tokens
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88ce656..aa87894 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -278,12 +278,9 @@ static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
[EEPROM_LNA] = 0x0026,
[EEPROM_EXT_LNA2] = 0x0027,
[EEPROM_RSSI_BG] = 0x0028,
- [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */
[EEPROM_RSSI_BG2] = 0x0029,
- [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */
[EEPROM_RSSI_A] = 0x002a,
[EEPROM_RSSI_A2] = 0x002b,
- [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */
[EEPROM_TXPOWER_BG1] = 0x0030,
[EEPROM_TXPOWER_BG2] = 0x0037,
[EEPROM_EXT_TXPOWER_BG3] = 0x003e,
@@ -1783,7 +1780,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_read(rt2x00dev, 3, &r3);
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2800_config_3572bt_ant(rt2x00dev);
/*
@@ -1795,7 +1792,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
else
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
@@ -1825,7 +1822,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ rt2x00_has_cap_bt_coexist(rt2x00dev)) {
rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
@@ -2029,13 +2026,6 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2x00dev->default_ant.tx_chain_num <= 2);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- msleep(1);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2141,7 +2131,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
@@ -2674,7 +2664,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
int idx = rf->channel-1;
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
static const char r55_bt_rev[] = {0x83, 0x83,
@@ -3152,6 +3142,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3322:
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
+ case RF3070:
case RF5360:
case RF5370:
case RF5372:
@@ -3166,7 +3157,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
- if (rt2x00_rf(rt2x00dev, RF3290) ||
+ if (rt2x00_rf(rt2x00dev, RF3070) ||
+ rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3322) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3218,8 +3210,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
if (!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2x00_rt(rt2x00dev, RT5392)) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
- &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
@@ -3244,7 +3235,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 83, 0x9a);
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 75, 0x46);
else
rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@ -3280,7 +3271,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/* Turn on primary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
rf->channel > 14);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
else
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
@@ -3311,33 +3302,50 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
- if (rt2x00_rt(rt2x00dev, RT3572))
+ if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+ /* AGC init */
+ if (rf->channel <= 14)
+ reg = 0x1c + (2 * rt2x00dev->lna_gain);
+ else
+ reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
+
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+ }
+
if (rt2x00_rt(rt2x00dev, RT3593)) {
- if (rt2x00_is_usb(rt2x00dev)) {
- rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
- /* Band selection. GPIO #8 controls all paths */
+ /* Band selection */
+ if (rt2x00_is_usb(rt2x00dev) ||
+ rt2x00_is_pcie(rt2x00dev)) {
+ /* GPIO #8 controls all paths */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
if (rf->channel <= 14)
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
else
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+ }
+ /* LNA PE control. */
+ if (rt2x00_is_usb(rt2x00dev)) {
+ /* GPIO #4 controls PE0 and PE1,
+ * GPIO #7 controls PE2
+ */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
- /* LNA PE control.
- * GPIO #4 controls PE0 and PE1,
- * GPIO #7 controls PE2
- */
rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
-
- rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+ } else if (rt2x00_is_pcie(rt2x00dev)) {
+ /* GPIO #4 controls PE0, PE1 and PE2 */
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
}
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
/* AGC init */
if (rf->channel <= 14)
reg = 0x1c + 2 * rt2x00dev->lna_gain;
@@ -3565,7 +3573,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
{
int delta;
- if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_power_limit(rt2x00dev))
return 0;
/*
@@ -3594,7 +3602,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
if (rt2x00_rt(rt2x00dev, RT3593))
return min_t(u8, txpower, 0xc);
- if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_power_limit(rt2x00dev)) {
/*
* Check if eirp txpower exceed txpower_limit.
* We use OFDM 6M as criterion and its eirp txpower
@@ -4264,6 +4272,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
case RF3053:
+ case RF3070:
case RF3290:
case RF5360:
case RF5370:
@@ -4405,6 +4414,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592))
@@ -4412,8 +4422,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
else
vgc = 0x2e + rt2x00dev->lna_gain;
} else { /* 5GHZ band */
- if (rt2x00_rt(rt2x00dev, RT3572))
- vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
else if (rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x24 + (2 * rt2x00dev->lna_gain);
else {
@@ -4431,11 +4441,17 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, u8 vgc_level)
{
if (qual->vgc_level != vgc_level) {
- if (rt2x00_rt(rt2x00dev, RT5592)) {
+ if (rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
+ vgc_level);
+ } else if (rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
- } else
+ } else {
rt2800_bbp_write(rt2x00dev, 66, vgc_level);
+ }
+
qual->vgc_level = vgc_level;
qual->vgc_level_reg = vgc_level;
}
@@ -4454,17 +4470,35 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
- /*
- * When RSSI is better then -80 increase VGC level with 0x10, except
- * for rt5592 chip.
+
+ /* When RSSI is better than a certain threshold, increase VGC
+ * with a chip specific value in order to improve the balance
+ * between sensibility and noise isolation.
*/
vgc = rt2800_get_default_vgc(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
- vgc += 0x20;
- else if (qual->rssi > -80)
- vgc += 0x10;
+ switch (rt2x00dev->chip.rt) {
+ case RT3572:
+ case RT3593:
+ if (qual->rssi > -65) {
+ if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+ vgc += 0x20;
+ else
+ vgc += 0x10;
+ }
+ break;
+
+ case RT5592:
+ if (qual->rssi > -65)
+ vgc += 0x20;
+ break;
+
+ default:
+ if (qual->rssi > -80)
+ vgc += 0x10;
+ break;
+ }
rt2800_set_vgc(rt2x00dev, qual, vgc);
}
@@ -5489,7 +5523,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
ant = (div_mode == 3) ? 1 : 0;
/* check if this is a Bluetooth combo card */
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
u32 reg;
rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
@@ -5798,7 +5832,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_external_lna_bg(rt2x00dev))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
@@ -5985,7 +6019,7 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -6441,7 +6475,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
@@ -6479,7 +6513,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
@@ -6499,7 +6533,6 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
rt2800_rf_init_calibration(rt2x00dev, 2);
rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
- rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
@@ -6653,17 +6686,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
u16 word;
/*
- * Initialize all registers.
+ * Initialize MAC registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev)))
return -EIO;
+ /*
+ * Wait BBP/RF to wake up.
+ */
if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
return -EIO;
/*
- * Send signal to firmware during boot time.
+ * Send signal during boot time to initialize firmware.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
@@ -6672,9 +6708,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
msleep(1);
+ /*
+ * Make sure BBP is up and running.
+ */
if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
return -EIO;
+ /*
+ * Initialize BBP/RF registers.
+ */
rt2800_init_bbp(rt2x00dev);
rt2800_init_rfcsr(rt2x00dev);
@@ -7021,6 +7063,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3052:
case RF3053:
+ case RF3070:
case RF3290:
case RF3320:
case RF3322:
@@ -7203,7 +7246,7 @@ static const struct rf_channel rf_vals[] = {
/*
* RF value list for rt3xxx
- * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053)
*/
static const struct rf_channel rf_vals_3x[] = {
{1, 241, 2, 2 },
@@ -7399,72 +7442,6 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
{196, 83, 0, 12, 1},
};
-static const struct rf_channel rf_vals_3053[] = {
- /* Channel, N, R, K */
- {1, 241, 2, 2},
- {2, 241, 2, 7},
- {3, 242, 2, 2},
- {4, 242, 2, 7},
- {5, 243, 2, 2},
- {6, 243, 2, 7},
- {7, 244, 2, 2},
- {8, 244, 2, 7},
- {9, 245, 2, 2},
- {10, 245, 2, 7},
- {11, 246, 2, 2},
- {12, 246, 2, 7},
- {13, 247, 2, 2},
- {14, 248, 2, 4},
-
- {36, 0x56, 0, 4},
- {38, 0x56, 0, 6},
- {40, 0x56, 0, 8},
- {44, 0x57, 0, 0},
- {46, 0x57, 0, 2},
- {48, 0x57, 0, 4},
- {52, 0x57, 0, 8},
- {54, 0x57, 0, 10},
- {56, 0x58, 0, 0},
- {60, 0x58, 0, 4},
- {62, 0x58, 0, 6},
- {64, 0x58, 0, 8},
-
- {100, 0x5B, 0, 8},
- {102, 0x5B, 0, 10},
- {104, 0x5C, 0, 0},
- {108, 0x5C, 0, 4},
- {110, 0x5C, 0, 6},
- {112, 0x5C, 0, 8},
-
- /* NOTE: Channel 114 has been removed intentionally.
- * The EEPROM contains no TX power values for that,
- * and it is disabled in the vendor driver as well.
- */
-
- {116, 0x5D, 0, 0},
- {118, 0x5D, 0, 2},
- {120, 0x5D, 0, 4},
- {124, 0x5D, 0, 8},
- {126, 0x5D, 0, 10},
- {128, 0x5E, 0, 0},
- {132, 0x5E, 0, 4},
- {134, 0x5E, 0, 6},
- {136, 0x5E, 0, 8},
- {140, 0x5F, 0, 0},
-
- {149, 0x5F, 0, 9},
- {151, 0x5F, 0, 11},
- {153, 0x60, 0, 1},
- {157, 0x60, 0, 5},
- {159, 0x60, 0, 7},
- {161, 0x60, 0, 9},
- {165, 0x61, 0, 1},
- {167, 0x61, 0, 3},
- {169, 0x61, 0, 5},
- {171, 0x61, 0, 7},
- {173, 0x61, 0, 9},
-};
-
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7543,6 +7520,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
+ rt2x00_rf(rt2x00dev, RF3070) ||
rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3320) ||
rt2x00_rf(rt2x00dev, RF3322) ||
@@ -7553,14 +7531,11 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF5392)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
- } else if (rt2x00_rf(rt2x00dev, RF3052)) {
+ } else if (rt2x00_rf(rt2x00dev, RF3052) ||
+ rt2x00_rf(rt2x00dev, RF3053)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
spec->channels = rf_vals_3x;
- } else if (rt2x00_rf(rt2x00dev, RF3053)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
- spec->num_channels = ARRAY_SIZE(rf_vals_3053);
- spec->channels = rf_vals_3053;
} else if (rt2x00_rf(rt2x00dev, RF5592)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
@@ -7671,6 +7646,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3320:
case RF3052:
case RF3053:
+ case RF3070:
case RF3290:
case RF5360:
case RF5370:
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 96961b9..96677ce5 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1176,6 +1176,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Linksys */
{ USB_DEVICE(0x13b1, 0x002f) },
{ USB_DEVICE(0x1737, 0x0079) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0170) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x3572) },
/* Sitecom */
@@ -1199,6 +1201,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x1103) },
/* Cameo */
{ USB_DEVICE(0x148f, 0xf301) },
+ /* D-Link */
+ { USB_DEVICE(0x2001, 0x3c1f) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7733) },
/* Hawking */
@@ -1212,6 +1216,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0789, 0x016b) },
/* NETGEAR */
{ USB_DEVICE(0x0846, 0x9012) },
+ { USB_DEVICE(0x0846, 0x9013) },
{ USB_DEVICE(0x0846, 0x9019) },
/* Planex */
{ USB_DEVICE(0x2019, 0xed19) },
@@ -1220,6 +1225,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0067) },
{ USB_DEVICE(0x0df6, 0x006a) },
+ { USB_DEVICE(0x0df6, 0x006e) },
/* ZyXEL */
{ USB_DEVICE(0x0586, 0x3421) },
#endif
@@ -1236,6 +1242,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x2001, 0x3c1c) },
{ USB_DEVICE(0x2001, 0x3c1d) },
{ USB_DEVICE(0x2001, 0x3c1e) },
+ { USB_DEVICE(0x2001, 0x3c20) },
+ { USB_DEVICE(0x2001, 0x3c22) },
+ { USB_DEVICE(0x2001, 0x3c23) },
/* LG innotek */
{ USB_DEVICE(0x043e, 0x7a22) },
{ USB_DEVICE(0x043e, 0x7a42) },
@@ -1258,12 +1267,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x043e, 0x7a32) },
/* AVM GmbH */
{ USB_DEVICE(0x057c, 0x8501) },
- /* D-Link DWA-160-B2 */
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x0241) },
+ /* D-Link */
{ USB_DEVICE(0x2001, 0x3c1a) },
+ { USB_DEVICE(0x2001, 0x3c21) },
/* Proware */
{ USB_DEVICE(0x043e, 0x7a13) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x5572) },
+ /* TRENDnet */
+ { USB_DEVICE(0x20f4, 0x724a) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
@@ -1333,6 +1347,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1d4d, 0x0010) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab24) },
+ { USB_DEVICE(0x2019, 0xab29) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259) },
/* RadioShack */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index fe4c572..e4ba2ce 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
#include <linux/input-polldev.h>
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
+#include <linux/average.h>
#include <net/mac80211.h>
@@ -138,17 +139,6 @@
#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
-/*
- * Structure for average calculation
- * The avg field contains the actual average value,
- * but avg_weight is internally used during calculations
- * to prevent rounding errors.
- */
-struct avg_val {
- int avg;
- int avg_weight;
-};
-
enum rt2x00_chip_intf {
RT2X00_CHIP_INTF_PCI,
RT2X00_CHIP_INTF_PCIE,
@@ -297,7 +287,7 @@ struct link_ant {
* Similar to the avg_rssi in the link_qual structure
* this value is updated by using the walking average.
*/
- struct avg_val rssi_ant;
+ struct ewma rssi_ant;
};
/*
@@ -326,7 +316,7 @@ struct link {
/*
* Currently active average RSSI value
*/
- struct avg_val avg_rssi;
+ struct ewma avg_rssi;
/*
* Work structure for scheduling periodic link tuning.
@@ -1179,6 +1169,93 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
}
+/* Helpers for capability flags */
+
+static inline bool
+rt2x00_has_cap_flag(struct rt2x00_dev *rt2x00dev,
+ enum rt2x00_capability_flags cap_flag)
+{
+ return test_bit(cap_flag, &rt2x00dev->cap_flags);
+}
+
+static inline bool
+rt2x00_has_cap_hw_crypto(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_HW_CRYPTO);
+}
+
+static inline bool
+rt2x00_has_cap_power_limit(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_POWER_LIMIT);
+}
+
+static inline bool
+rt2x00_has_cap_control_filters(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTERS);
+}
+
+static inline bool
+rt2x00_has_cap_control_filter_pspoll(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTER_PSPOLL);
+}
+
+static inline bool
+rt2x00_has_cap_pre_tbtt_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_PRE_TBTT_INTERRUPT);
+}
+
+static inline bool
+rt2x00_has_cap_link_tuning(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_LINK_TUNING);
+}
+
+static inline bool
+rt2x00_has_cap_frame_type(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_FRAME_TYPE);
+}
+
+static inline bool
+rt2x00_has_cap_rf_sequence(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_RF_SEQUENCE);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_a(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_A);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_BG);
+}
+
+static inline bool
+rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_DOUBLE_ANTENNA);
+}
+
+static inline bool
+rt2x00_has_cap_bt_coexist(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_BT_COEXIST);
+}
+
+static inline bool
+rt2x00_has_cap_vco_recalibration(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_VCO_RECALIBRATION);
+}
+
/**
* rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
* @entry: Pointer to &struct queue_entry
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 1ca4c7f..3db0d99 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -52,7 +52,7 @@ void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
return;
__set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@ -80,7 +80,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
struct ieee80211_key_conf *key = tx_info->control.hw_key;
unsigned int overhead = 0;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key)
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
return overhead;
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fe7a7f6..7f7baae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -750,7 +750,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf, &rt2x00debug_fop_queue_stats);
#ifdef CONFIG_RT2X00_LIB_CRYPTO
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
intf->crypto_stats_entry =
debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
intf, &rt2x00debug_fop_crypto_stats);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 712eea9..080b1fc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -88,7 +88,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00queue_start_queues(rt2x00dev);
rt2x00link_start_tuner(rt2x00dev);
rt2x00link_start_agc(rt2x00dev);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
rt2x00link_start_vcocal(rt2x00dev);
/*
@@ -113,7 +113,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
* Stop all queues
*/
rt2x00link_stop_agc(rt2x00dev);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
rt2x00link_stop_vcocal(rt2x00dev);
rt2x00link_stop_tuner(rt2x00dev);
rt2x00queue_stop_queues(rt2x00dev);
@@ -234,7 +234,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
* here as they will fetch the next beacon directly prior to
* transmission.
*/
- if (test_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev))
return;
/* fetch next beacon */
@@ -358,7 +358,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* mac80211 will expect the same data to be present it the
* frame as it was passed to us.
*/
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
rt2x00crypto_tx_insert_iv(entry->skb, header_length);
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 8368aab..c2b3b66 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -35,50 +35,28 @@
*/
#define DEFAULT_RSSI -128
-/*
- * Helper struct and macro to work with moving/walking averages.
- * When adding a value to the average value the following calculation
- * is needed:
- *
- * avg_rssi = ((avg_rssi * 7) + rssi) / 8;
- *
- * The advantage of this approach is that we only need 1 variable
- * to store the average in (No need for a count and a total).
- * But more importantly, normal average values will over time
- * move less and less towards newly added values this results
- * that with link tuning, the device can have a very good RSSI
- * for a few minutes but when the device is moved away from the AP
- * the average will not decrease fast enough to compensate.
- * The walking average compensates this and will move towards
- * the new values correctly allowing a effective link tuning,
- * the speed of the average moving towards other values depends
- * on the value for the number of samples. The higher the number
- * of samples, the slower the average will move.
- * We use two variables to keep track of the average value to
- * compensate for the rounding errors. This can be a significant
- * error (>5dBm) if the factor is too low.
- */
-#define AVG_SAMPLES 8
-#define AVG_FACTOR 1000
-#define MOVING_AVERAGE(__avg, __val) \
-({ \
- struct avg_val __new; \
- __new.avg_weight = \
- (__avg).avg_weight ? \
- ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
- ((__val) * (AVG_FACTOR))) / \
- (AVG_SAMPLES)) : \
- ((__val) * (AVG_FACTOR)); \
- __new.avg = __new.avg_weight / (AVG_FACTOR); \
- __new; \
-})
+/* Constants for EWMA calculations. */
+#define RT2X00_EWMA_FACTOR 1024
+#define RT2X00_EWMA_WEIGHT 8
+
+static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+{
+ unsigned long avg;
+
+ avg = ewma_read(ewma);
+ if (avg)
+ return -avg;
+
+ return DEFAULT_RSSI;
+}
static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
{
struct link_ant *ant = &rt2x00dev->link.ant;
- if (ant->rssi_ant.avg && rt2x00dev->link.qual.rx_success)
- return ant->rssi_ant.avg;
+ if (rt2x00dev->link.qual.rx_success)
+ return rt2x00link_get_avg_rssi(&ant->rssi_ant);
+
return DEFAULT_RSSI;
}
@@ -100,8 +78,8 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
{
- rt2x00dev->link.ant.rssi_ant.avg = 0;
- rt2x00dev->link.ant.rssi_ant.avg_weight = 0;
+ ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
+ RT2X00_EWMA_WEIGHT);
}
static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -249,12 +227,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
/*
* Update global RSSI
*/
- link->avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi);
+ ewma_add(&link->avg_rssi, -rxdesc->rssi);
/*
* Update antenna RSSI
*/
- ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi);
+ ewma_add(&ant->rssi_ant, -rxdesc->rssi);
}
void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -309,6 +287,8 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
*/
rt2x00dev->link.count = 0;
memset(qual, 0, sizeof(*qual));
+ ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
+ RT2X00_EWMA_WEIGHT);
/*
* Restore the VGC level as stored in the registers,
@@ -363,17 +343,17 @@ static void rt2x00link_tuner(struct work_struct *work)
* collect the RSSI data we could use this. Otherwise we
* must fallback to the default RSSI value.
*/
- if (!link->avg_rssi.avg || !qual->rx_success)
+ if (!qual->rx_success)
qual->rssi = DEFAULT_RSSI;
else
- qual->rssi = link->avg_rssi.avg;
+ qual->rssi = rt2x00link_get_avg_rssi(&link->avg_rssi);
/*
* Check if link tuning is supported by the hardware, some hardware
* do not support link tuning at all, while other devices can disable
* the feature from the EEPROM.
*/
- if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_link_tuning(rt2x00dev))
rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
/*
@@ -513,7 +493,7 @@ static void rt2x00link_vcocal(struct work_struct *work)
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
{
INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f883802..51f17cf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -382,11 +382,11 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
* of different types, but has no a separate filter for PS Poll frames,
* FIF_CONTROL flag implies FIF_PSPOLL.
*/
- if (!test_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_control_filters(rt2x00dev)) {
if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
*total_flags |= FIF_CONTROL | FIF_PSPOLL;
}
- if (!test_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) {
if (*total_flags & FIF_CONTROL)
*total_flags |= FIF_PSPOLL;
}
@@ -469,7 +469,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
return -EOPNOTSUPP;
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 76d95de..6c5d667 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -121,7 +121,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
rt2x00dev->ops = ops;
rt2x00dev->hw = hw;
rt2x00dev->irq = pci_dev->irq;
- rt2x00dev->name = pci_name(pci_dev);
+ rt2x00dev->name = ops->name;
if (pci_is_pcie(pci_dev))
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c8a33b..50590b1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -61,7 +61,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
* at least 8 bytes bytes available in headroom for IV/EIV
* and 8 bytes for ICV data as tailroon.
*/
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
head_size += 8;
tail_size += 8;
}
@@ -1033,38 +1033,21 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
{
- bool started;
bool tx_queue =
(queue->qid == QID_AC_VO) ||
(queue->qid == QID_AC_VI) ||
(queue->qid == QID_AC_BE) ||
(queue->qid == QID_AC_BK);
- mutex_lock(&queue->status_lock);
/*
- * If the queue has been started, we must stop it temporarily
- * to prevent any new frames to be queued on the device. If
- * we are not dropping the pending frames, the queue must
- * only be stopped in the software and not the hardware,
- * otherwise the queue will never become empty on its own.
+ * If we are not supposed to drop any pending
+ * frames, this means we must force a start (=kick)
+ * to the queue to make sure the hardware will
+ * start transmitting.
*/
- started = test_bit(QUEUE_STARTED, &queue->flags);
- if (started) {
- /*
- * Pause the queue
- */
- rt2x00queue_pause_queue(queue);
-
- /*
- * If we are not supposed to drop any pending
- * frames, this means we must force a start (=kick)
- * to the queue to make sure the hardware will
- * start transmitting.
- */
- if (!drop && tx_queue)
- queue->rt2x00dev->ops->lib->kick_queue(queue);
- }
+ if (!drop && tx_queue)
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
/*
* Check if driver supports flushing, if that is the case we can
@@ -1080,14 +1063,6 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
if (unlikely(!rt2x00queue_empty(queue)))
rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
queue->qid);
-
- /*
- * Restore the queue to the previous status
- */
- if (started)
- rt2x00queue_unpause_queue(queue);
-
- mutex_unlock(&queue->status_lock);
}
EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 8828987..4e12162 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -523,7 +523,9 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
queue->qid);
+ rt2x00queue_stop_queue(queue);
rt2x00queue_flush_queue(queue, true);
+ rt2x00queue_start_queue(queue);
}
static int rt2x00usb_dma_timeout(struct data_queue *queue)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 54d3ddf..a5b69cb 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -685,7 +685,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+ !rt2x00_has_cap_frame_type(rt2x00dev));
/*
* Configure the RX antenna.
@@ -813,10 +813,10 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
sel = antenna_sel_bg;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -836,7 +836,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
else if (rt2x00_rf(rt2x00dev, RF2527))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else if (rt2x00_rf(rt2x00dev, RF2529)) {
- if (test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_double_antenna(rt2x00dev))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else
rt61pci_config_antenna_2529(rt2x00dev, ant);
@@ -850,13 +850,13 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain = 0;
if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
} else {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
@@ -1054,14 +1054,14 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
} else {
low_bound = 0x20;
up_bound = 0x40;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
@@ -2578,7 +2578,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
* eeprom word.
*/
if (rt2x00_rf(rt2x00dev, RF2529) &&
- !test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) {
+ !rt2x00_has_cap_double_antenna(rt2x00dev)) {
rt2x00dev->default_ant.rx =
ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
rt2x00dev->default_ant.tx =
@@ -2793,7 +2793,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (!test_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_rf_sequence(rt2x00dev)) {
spec->num_channels = 14;
spec->channels = rf_vals_noseq;
} else {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1d3880e..1baf9c8 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -595,8 +595,8 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
switch (ant->rx) {
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
- temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)
- && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+ temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
+ (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
break;
case ANTENNA_A:
@@ -636,7 +636,7 @@ static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+ !rt2x00_has_cap_frame_type(rt2x00dev));
/*
* Configure the RX antenna.
@@ -709,10 +709,10 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
sel = antenna_sel_bg;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -740,7 +740,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain = 0;
if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
@@ -930,7 +930,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
low_bound = 0x28;
up_bound = 0x48;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
@@ -946,7 +946,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
up_bound = 0x1c;
}
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
low_bound += 0x14;
up_bound += 0x10;
}
@@ -1661,7 +1661,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
}
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
if (lna == 3 || lna == 2)
offset += 10;
} else {
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index fc207b2..a91506b 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1122,7 +1122,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 8bb4a9a..9a78e3d 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1613,6 +1613,35 @@ err_free:
}
EXPORT_SYMBOL(rtl_send_smps_action);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Unknown Scan Backup operation.\n");
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(rtl_phy_scan_operation_backup);
+
/* There seem to be issues in mac80211 regarding when del ba frames can be
* received. As a work around, we make a fake del_ba if we receive a ba_req;
* however, rx_agg was opened to let mac80211 release some ba related
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 0e5fe09..0cd0742 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -114,7 +114,6 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -153,5 +152,6 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
u8 *sa, u8 *bssid, u16 tid);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 838a1ed..ae13fb9 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1203,20 +1203,18 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
- int continual = true;
u16 efuse_addr = 0;
u8 hworden;
u8 efuse_data, word_cnts;
- while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
- && (efuse_addr < EFUSE_MAX_SIZE)) {
- if (efuse_data != 0xFF) {
- hworden = efuse_data & 0x0F;
- word_cnts = efuse_calculate_word_cnts(hworden);
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- } else {
- continual = false;
- }
+ while (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+ efuse_addr < EFUSE_MAX_SIZE) {
+ if (efuse_data == 0xFF)
+ break;
+
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
}
return efuse_addr;
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 703f839..0f49444 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -736,7 +736,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
@@ -2009,7 +2008,6 @@ fail2:
fail1:
if (hw)
ieee80211_free_hw(hw);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
@@ -2064,8 +2062,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
rtl_pci_disable_aspm(hw);
- pci_set_drvdata(pdev, NULL);
-
ieee80211_free_hw(hw);
}
EXPORT_SYMBOL(rtl_pci_disconnect);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index b68cae3..e06971b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -143,6 +143,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
} else {
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index e655c04..d67f9c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -1136,34 +1136,6 @@ void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
&bw40_pwr[0], channel);
}
-void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
index f1acd6d..d4545f0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
@@ -217,8 +217,6 @@ extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
long *powerlevel);
extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index c254693..347af1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -244,7 +245,7 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
.set_bw_mode = rtl88e_phy_set_bw_mode,
.switch_channel = rtl88e_phy_sw_chnl,
.dm_watchdog = rtl88e_dm_watchdog,
- .scan_operation_backup = rtl88e_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl88e_phy_set_rf_power_state,
.led_control = rtl88ee_led_control,
.set_desc = rtl88ee_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 68685a8..aece6c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -478,7 +478,6 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = status->signal; */
rx_status->signal = status->recvsignalpower + 10;
- /*rx_status->noise = -status->noise; */
if (status->packet_report_type == TX_REPORT2) {
status->macid_valid_entry[0] =
GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index d2d57a2..e9caa5d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -541,29 +541,6 @@ EXPORT_SYMBOL(rtl92c_dm_write_dig);
static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
- u8 h2c_parameter[3] = { 0 };
-
- return;
-
- if (tmpentry_max_pwdb != 0) {
- rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
- } else {
- rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
- }
-
- if (tmpentry_min_pwdb != 0xff) {
- rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
- } else {
- rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
- }
-
- h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
- h2c_parameter[0] = 0;
-
- rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
}
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -673,7 +650,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
s8 cck_index = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
- s8 txpwr_level[2] = {0, 0};
+ s8 txpwr_level[3] = {0, 0, 0};
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 246e535..0c0e782 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -592,36 +592,6 @@ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation\n");
- break;
- }
- }
-}
-EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
-
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
index cec10d6..e79dabe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -205,8 +203,6 @@ void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 3cfa1bb..fa24de4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -152,8 +152,6 @@ enum version_8192c {
#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false)
-#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
- ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false)
#define IS_CHIP_VENDOR_UMC(version) \
((version & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index d5e3b70..aeb268b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -209,8 +207,6 @@ void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
@@ -226,7 +222,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
u32 rfpath);
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state);
void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index bd4aef7..8922ecb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -560,7 +560,6 @@
#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
#define EEPROM_DEFAULT_HT20_DIFF 2
-#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
@@ -639,17 +638,8 @@
#define EEPROM_TXPWR_GROUP 0x6F
-#define EEPROM_TSSI_A 0x76
-#define EEPROM_TSSI_B 0x77
-#define EEPROM_THERMAL_METER 0x78
-
#define EEPROM_CHANNELPLAN 0x75
-#define RF_OPTION1 0x79
-#define RF_OPTION2 0x7A
-#define RF_OPTION3 0x7B
-#define RF_OPTION4 0x7C
-
#define STOPBECON BIT(6)
#define STOPHIGHT BIT(5)
#define STOPMGT BIT(4)
@@ -689,13 +679,6 @@
#define RSV_CTRL 0x001C
#define RD_CTRL 0x0524
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
#define REG_USB_VID 0xFE60
#define REG_USB_PID 0xFE62
#define REG_USB_OPTIONAL 0xFE64
@@ -1196,9 +1179,6 @@
#define POLLING_LLT_THRESHOLD 20
#define POLLING_READY_TIMEOUT_COUNT 1000
-#define MAX_MSS_DENSITY_2T 0x13
-#define MAX_MSS_DENSITY_1T 0x0A
-
#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
#define EPROM_CMD_CONFIG 0x3
#define EPROM_CMD_LOAD 1
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 1420356..b790320 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -219,7 +220,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
- .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92c_phy_set_rf_power_state,
.led_control = rtl92ce_led_control,
.set_desc = rtl92ce_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 6ad23b4..52abf0a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -420,7 +420,6 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->recvsignalpower + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index da4f587..3936853 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -32,6 +32,7 @@
#include "../usb.h"
#include "../ps.h"
#include "../cam.h"
+#include "../stats.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -738,16 +739,6 @@ static u8 _rtl92c_evm_db_to_percentage(char value)
return ret_val;
}
-static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
- u8 signal_strength_index)
-{
- long signal_power;
-
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
- return signal_power;
-}
-
static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
long currsig)
{
@@ -913,180 +904,6 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
(hw, total_rssi /= rf_rx_num));
}
-static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 rfpath;
- u32 last_rssi, tmpval;
-
- if (pstats->packet_toself || pstats->packet_beacon) {
- rtlpriv->stats.rssi_calculate_cnt++;
- if (rtlpriv->stats.ui_rssi.total_num++ >=
- PHY_RSSI_SLID_WIN_MAX) {
- rtlpriv->stats.ui_rssi.total_num =
- PHY_RSSI_SLID_WIN_MAX;
- last_rssi =
- rtlpriv->stats.ui_rssi.elements[rtlpriv->
- stats.ui_rssi.index];
- rtlpriv->stats.ui_rssi.total_val -= last_rssi;
- }
- rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
- rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
- index++] = pstats->signalstrength;
- if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
- rtlpriv->stats.ui_rssi.index = 0;
- tmpval = rtlpriv->stats.ui_rssi.total_val /
- rtlpriv->stats.ui_rssi.total_num;
- rtlpriv->stats.signal_strength =
- _rtl92c_translate_todbm(hw, (u8) tmpval);
- pstats->rssi = rtlpriv->stats.signal_strength;
- }
- if (!pstats->is_cck && pstats->packet_toself) {
- for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
- rfpath++) {
- if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
- continue;
- if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- pstats->rx_mimo_signalstrength[rfpath];
- }
- if (pstats->rx_mimo_signalstrength[rfpath] >
- rtlpriv->stats.rx_rssi_percentage[rfpath]) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.
- rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
-
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- rtlpriv->stats.rx_rssi_percentage[rfpath] +
- 1;
- } else {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.
- rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- }
- }
- }
-}
-
-static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int weighting = 0;
-
- if (rtlpriv->stats.recv_signal_power == 0)
- rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
- if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
- weighting = 5;
- else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
- weighting = (-5);
- rtlpriv->stats.recv_signal_power =
- (rtlpriv->stats.recv_signal_power * 5 +
- pstats->recvsignalpower + weighting) / 6;
-}
-
-static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undec_sm_pwdb = 0;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- return;
- } else {
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- }
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (undec_sm_pwdb < 0)
- undec_sm_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undec_sm_pwdb += 1;
- } else {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- }
- rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
- _rtl92c_update_rxsignalstatistics(hw, pstats);
- }
-}
-
-static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_evm = 0, n_stream, tmpval;
-
- if (pstats->signalquality != 0) {
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (rtlpriv->stats.LINK_Q.total_num++ >=
- PHY_LINKQUALITY_SLID_WIN_MAX) {
- rtlpriv->stats.LINK_Q.total_num =
- PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm =
- rtlpriv->stats.LINK_Q.elements
- [rtlpriv->stats.LINK_Q.index];
- rtlpriv->stats.LINK_Q.total_val -=
- last_evm;
- }
- rtlpriv->stats.LINK_Q.total_val +=
- pstats->signalquality;
- rtlpriv->stats.LINK_Q.elements
- [rtlpriv->stats.LINK_Q.index++] =
- pstats->signalquality;
- if (rtlpriv->stats.LINK_Q.index >=
- PHY_LINKQUALITY_SLID_WIN_MAX)
- rtlpriv->stats.LINK_Q.index = 0;
- tmpval = rtlpriv->stats.LINK_Q.total_val /
- rtlpriv->stats.LINK_Q.total_num;
- rtlpriv->stats.signal_quality = tmpval;
- rtlpriv->stats.last_sigstrength_inpercent = tmpval;
- for (n_stream = 0; n_stream < 2;
- n_stream++) {
- if (pstats->RX_SIGQ[n_stream] != -1) {
- if (!rtlpriv->stats.RX_EVM[n_stream]) {
- rtlpriv->stats.RX_EVM[n_stream]
- = pstats->RX_SIGQ[n_stream];
- }
- rtlpriv->stats.RX_EVM[n_stream] =
- ((rtlpriv->stats.RX_EVM
- [n_stream] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->RX_SIGQ
- [n_stream] * 1)) /
- (RX_SMOOTH_FACTOR);
- }
- }
- }
- } else {
- ;
- }
-}
-
-static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
- u8 *buffer,
- struct rtl_stats *pcurrent_stats)
-{
- if (!pcurrent_stats->packet_matchbssid &&
- !pcurrent_stats->packet_beacon)
- return;
- _rtl92c_process_ui_rssi(hw, pcurrent_stats);
- _rtl92c_process_pwdb(hw, pcurrent_stats);
- _rtl92c_process_LINK_Q(hw, pcurrent_stats);
-}
-
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstats,
@@ -1123,5 +940,5 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
_rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
packet_matchbssid, packet_toself,
packet_beacon);
- _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+ rtl_process_phyinfo(hw, tmp_buf, pstats);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 2bd5985..9936de7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -31,6 +31,7 @@
#include "../core.h"
#include "../usb.h"
#include "../efuse.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -117,7 +118,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
- .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92cu_phy_set_rf_power_state,
.led_control = rtl92cu_led_control,
.enable_hw_sec = rtl92cu_enable_hw_security_config,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 763cf1d..04c7e57 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -349,7 +349,6 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
}
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
@@ -364,7 +363,6 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
u8 *rxdesc;
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
struct rx_fwinfo_92c *p_drvinfo;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index f700f7a..7908e1c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -840,9 +840,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
bool internal_pa = false;
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
- u8 ofdm_index[2];
+ u8 ofdm_index[3];
s8 cck_index = 0;
- u8 ofdm_index_old[2] = {0, 0};
+ u8 ofdm_index_old[3] = {0, 0, 0};
s8 cck_index_old = 0;
u8 index;
int i;
@@ -1118,6 +1118,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
val_x, val_y, ele_a, ele_c, ele_d,
val_x, val_y);
+ if (cck_index >= CCK_TABLE_SIZE)
+ cck_index = CCK_TABLE_SIZE - 1;
+ if (cck_index < 0)
+ cck_index = 0;
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* Adjust CCK according to IQK result */
if (!rtlpriv->dm.cck_inch14) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 7dd8f6d..c4a7db9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1194,25 +1194,7 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
* mac80211 will send pkt when scan */
void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl92d_dm_init_edca_turbo(hw);
- return;
- switch (aci) {
- case AC1_BK:
- rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
- break;
- case AC0_BE:
- break;
- case AC2_VI:
- rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
- break;
- case AC3_VO:
- rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
- break;
- default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
- break;
- }
}
void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 840bac5..13196cc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1022,34 +1022,6 @@ void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
}
-void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- rtlhal->current_bandtypebackup =
- rtlhal->current_bandtype;
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation\n");
- break;
- }
- }
-}
-
void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
index f074952..bef3040 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -144,8 +142,6 @@ extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
@@ -173,6 +169,5 @@ void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
unsigned long *flag);
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
-void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index c18c04b..edab5a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -236,7 +237,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.set_bw_mode = rtl92d_phy_set_bw_mode,
.switch_channel = rtl92d_phy_sw_chnl,
.dm_watchdog = rtl92d_dm_watchdog,
- .scan_operation_backup = rtl92d_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92d_phy_set_rf_power_state,
.led_control = rtl92de_led_control,
.set_desc = rtl92de_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index b8ec718..945ddec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -526,7 +526,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index 84d1181..c81c835 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -425,14 +425,9 @@
#define EXT_IMEM_CODE_DONE BIT(2)
#define IMEM_CHK_RPT BIT(1)
#define IMEM_CODE_DONE BIT(0)
-#define IMEM_CODE_DONE BIT(0)
-#define IMEM_CHK_RPT BIT(1)
#define EMEM_CODE_DONE BIT(2)
#define EMEM_CHK_RPT BIT(3)
-#define DMEM_CODE_DONE BIT(4)
#define IMEM_RDY BIT(5)
-#define BASECHG BIT(6)
-#define FWRDY BIT(7)
#define LOAD_FW_READY (IMEM_CODE_DONE | \
IMEM_CHK_RPT | \
EMEM_CODE_DONE | \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index c709511..222d2e7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -330,7 +330,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index eafbb18..5d318a8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -934,35 +934,6 @@ static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
return pwrout_dbm;
}
-void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index e7a59eb..3d8f9e3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -205,8 +205,6 @@ extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
u8 channel);
extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index d9ee2ef..62b204f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -33,6 +33,7 @@
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -220,7 +221,7 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
.set_bw_mode = rtl8723ae_phy_set_bw_mode,
.switch_channel = rtl8723ae_phy_sw_chnl,
.dm_watchdog = rtl8723ae_dm_watchdog,
- .scan_operation_backup = rtl8723ae_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
.led_control = rtl8723ae_led_control,
.set_desc = rtl8723ae_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index bcd82a1..50b7be3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -359,7 +359,6 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = status->signal; */
rx_status->signal = status->recvsignalpower + 10;
- /*rx_status->noise = -status->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index e56778c..6e2b5c5 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -455,7 +455,6 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status = {0};
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
@@ -498,7 +497,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status = {0};
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
@@ -582,12 +580,15 @@ static void _rtl_rx_work(unsigned long param)
static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
unsigned int len)
{
+#if NET_IP_ALIGN != 0
unsigned int padding = 0;
+#endif
/* make function no-op when possible */
if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
return 0;
+#if NET_IP_ALIGN != 0
/* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
/* TODO: deduplicate common code, define helper function instead? */
@@ -608,6 +609,7 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
padding ^= NET_IP_ALIGN;
return padding;
+#endif
}
#define __RADIO_TAP_SIZE_RSV 32
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cc03e7c..96763dc 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -192,8 +192,6 @@ enum hardware_type {
(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
#define IS_HARDWARE_TYPE_8723(rtlhal) \
(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
-#define IS_HARDWARE_TYPE_8723U(rtlhal) \
- (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
#define RX_HAL_IS_CCK_RATE(_pdesc)\
(_pdesc->rxmcs == DESC92_RATE1M || \
diff --git a/drivers/net/wireless/ti/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig
index 8fec4ed..477a206 100644
--- a/drivers/net/wireless/ti/wl1251/Kconfig
+++ b/drivers/net/wireless/ti/wl1251/Kconfig
@@ -1,6 +1,6 @@
menuconfig WL1251
tristate "TI wl1251 driver support"
- depends on MAC80211 && GENERIC_HARDIRQS
+ depends on MAC80211
select FW_LOADER
select CRC7
---help---
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index c7dc6fe..1342f81 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -243,7 +243,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
struct wl1251 *wl;
int ret;
- pdata = spi->dev.platform_data;
+ pdata = dev_get_platdata(&spi->dev);
if (!pdata) {
wl1251_error("no platform data");
return -ENODEV;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index fd02060..2c3bd1b 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -424,8 +424,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define CHIP_ID_1271_PG10 (0x4030101)
#define CHIP_ID_1271_PG20 (0x4030111)
-#define WL1251_FW_NAME "wl1251-fw.bin"
-#define WL1251_NVS_NAME "wl1251-nvs.bin"
+#define WL1251_FW_NAME "ti-connectivity/wl1251-fw.bin"
+#define WL1251_NVS_NAME "ti-connectivity/wl1251-nvs.bin"
#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 1c627da..591526b 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1704,7 +1704,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
- struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
wl->rtable = wl12xx_rtable;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 7aa0eb8..d0daca1 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -623,6 +623,18 @@ static const int wl18xx_rtable[REG_TABLE_LEN] = {
[REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
};
+static const struct wl18xx_clk_cfg wl18xx_clk_table_coex[NUM_CLOCK_CONFIGS] = {
+ [CLOCK_CONFIG_16_2_M] = { 8, 121, 0, 0, false },
+ [CLOCK_CONFIG_16_368_M] = { 8, 120, 0, 0, false },
+ [CLOCK_CONFIG_16_8_M] = { 8, 117, 0, 0, false },
+ [CLOCK_CONFIG_19_2_M] = { 10, 128, 0, 0, false },
+ [CLOCK_CONFIG_26_M] = { 11, 104, 0, 0, false },
+ [CLOCK_CONFIG_32_736_M] = { 8, 120, 0, 0, false },
+ [CLOCK_CONFIG_33_6_M] = { 8, 117, 0, 0, false },
+ [CLOCK_CONFIG_38_468_M] = { 10, 128, 0, 0, false },
+ [CLOCK_CONFIG_52_M] = { 11, 104, 0, 0, false },
+};
+
static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
[CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
[CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
@@ -704,6 +716,23 @@ static int wl18xx_set_clk(struct wl1271 *wl)
wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
+ /* coex PLL configuration */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_N,
+ wl18xx_clk_table_coex[clk_freq].n);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_M,
+ wl18xx_clk_table_coex[clk_freq].m);
+ if (ret < 0)
+ goto out;
+
+ /* bypass the swallowing logic */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+ PLLSH_COEX_PLL_SWALLOW_EN_VAL1);
+ if (ret < 0)
+ goto out;
+
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
wl18xx_clk_table[clk_freq].n);
if (ret < 0)
@@ -745,6 +774,30 @@ static int wl18xx_set_clk(struct wl1271 *wl)
PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
}
+ /* choose WCS PLL */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_SEL,
+ PLLSH_WL_PLL_SEL_WCS_PLL);
+ if (ret < 0)
+ goto out;
+
+ /* enable both PLLs */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL1);
+ if (ret < 0)
+ goto out;
+
+ udelay(1000);
+
+ /* disable coex PLL */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL2);
+ if (ret < 0)
+ goto out;
+
+ /* reset the swallowing logic */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+ PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
+ if (ret < 0)
+ goto out;
+
out:
return ret;
}
@@ -1175,16 +1228,48 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
}
}
+static const char *wl18xx_rdl_name(enum wl18xx_rdl_num rdl_num)
+{
+ switch (rdl_num) {
+ case RDL_1_HP:
+ return "183xH";
+ case RDL_2_SP:
+ return "183x or 180x";
+ case RDL_3_HP:
+ return "187xH";
+ case RDL_4_SP:
+ return "187x";
+ case RDL_5_SP:
+ return "RDL11 - Not Supported";
+ case RDL_6_SP:
+ return "180xD";
+ case RDL_7_SP:
+ return "RDL13 - Not Supported (1893Q)";
+ case RDL_8_SP:
+ return "18xxQ";
+ case RDL_NONE:
+ return "UNTRIMMED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
{
u32 fuse;
- s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0;
+ s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0, package_type = 0;
int ret;
ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
if (ret < 0)
goto out;
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
+ if (ret < 0)
+ goto out;
+
+ package_type = (fuse >> WL18XX_PACKAGE_TYPE_OFFSET) & 1;
+
ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
if (ret < 0)
goto out;
@@ -1192,7 +1277,7 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
- if (rom <= 0xE)
+ if ((rom <= 0xE) && (package_type == WL18XX_PACKAGE_TYPE_WSP))
metal = (fuse & WL18XX_METAL_VER_MASK) >>
WL18XX_METAL_VER_OFFSET;
else
@@ -1204,11 +1289,9 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
goto out;
rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
- if (rdl_ver > RDL_MAX)
- rdl_ver = RDL_NONE;
- wl1271_info("wl18xx HW: RDL %d, %s, PG %x.%x (ROM %x)",
- rdl_ver, rdl_names[rdl_ver], pg_ver, metal, rom);
+ wl1271_info("wl18xx HW: %s, PG %d.%d (ROM 0x%x)",
+ wl18xx_rdl_name(rdl_ver), pg_ver, metal, rom);
if (ver)
*ver = pg_ver;
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 05dd8ba..a433a75 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -114,6 +114,11 @@
#define PLATFORM_DETECTION 0xA0E3E0
#define OCS_EN 0xA02080
#define PRIMARY_CLK_DETECT 0xA020A6
+#define PLLSH_COEX_PLL_N 0xA02384
+#define PLLSH_COEX_PLL_M 0xA02382
+#define PLLSH_COEX_PLL_SWALLOW_EN 0xA0238E
+#define PLLSH_WL_PLL_SEL 0xA02398
+
#define PLLSH_WCS_PLL_N 0xA02362
#define PLLSH_WCS_PLL_M 0xA02360
#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
@@ -128,19 +133,30 @@
#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
+#define PLLSH_WL_PLL_EN_VAL1 0x7
+#define PLLSH_WL_PLL_EN_VAL2 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL1 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL2 0x11
+
#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
+#define PLLSH_WL_PLL_SEL_WCS_PLL 0x0
+#define PLLSH_WL_PLL_SEL_COEX_PLL 0x1
+
#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
#define WL18XX_PG_VER_MASK 0x70
#define WL18XX_PG_VER_OFFSET 4
-#define WL18XX_ROM_VER_MASK 0x3
-#define WL18XX_ROM_VER_OFFSET 0
+#define WL18XX_ROM_VER_MASK 0x3e00
+#define WL18XX_ROM_VER_OFFSET 9
#define WL18XX_METAL_VER_MASK 0xC
#define WL18XX_METAL_VER_OFFSET 2
#define WL18XX_NEW_METAL_VER_MASK 0x180
#define WL18XX_NEW_METAL_VER_OFFSET 7
+#define WL18XX_PACKAGE_TYPE_OFFSET 13
+#define WL18XX_PACKAGE_TYPE_WSP 0
+
#define WL18XX_REG_FUSE_DATA_2_3 0xA02614
#define WL18XX_RDL_VER_MASK 0x1f00
#define WL18XX_RDL_VER_OFFSET 8
@@ -201,24 +217,21 @@ enum {
NUM_BOARD_TYPES,
};
-enum {
+enum wl18xx_rdl_num {
RDL_NONE = 0,
RDL_1_HP = 1,
RDL_2_SP = 2,
RDL_3_HP = 3,
RDL_4_SP = 4,
+ RDL_5_SP = 0x11,
+ RDL_6_SP = 0x12,
+ RDL_7_SP = 0x13,
+ RDL_8_SP = 0x14,
_RDL_LAST,
RDL_MAX = _RDL_LAST - 1,
};
-static const char * const rdl_names[] = {
- [RDL_NONE] = "",
- [RDL_1_HP] = "1853 SISO",
- [RDL_2_SP] = "1857 MIMO",
- [RDL_3_HP] = "1893 SISO",
- [RDL_4_SP] = "1897 MIMO",
-};
/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
#define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index 2b83282..7c09954 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -1,6 +1,6 @@
config WLCORE
tristate "TI wlcore support"
- depends on WL_TI && GENERIC_HARDIRQS && MAC80211
+ depends on WL_TI && MAC80211
select FW_LOADER
---help---
This module contains the main code for TI WLAN chips. It abstracts
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index c9e0607..9e5416f8 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1126,6 +1126,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 template_id_2_4 = wl->scan_templ_id_2_4;
u16 template_id_5 = wl->scan_templ_id_5;
+ wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
+
skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie_len);
if (!skb) {
@@ -1135,8 +1137,6 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (ie_len)
memcpy(skb_put(skb, ie_len), ie, ie_len);
- wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
-
if (sched_scan &&
(wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
template_id_2_4 = wl->sched_scan_templ_id_2_4;
@@ -1172,7 +1172,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
if (!skb)
goto out;
- wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
+ wl1271_debug(DEBUG_SCAN, "set ap probe request template");
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
if (wlvif->band == IEEE80211_BAND_2GHZ)
@@ -1607,33 +1607,43 @@ out:
static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
{
- int idx = -1;
-
+ /*
+ * map the given band/channel to the respective predefined
+ * bit expected by the fw
+ */
switch (band) {
- case IEEE80211_BAND_5GHZ:
- if (ch >= 8 && ch <= 16)
- idx = ((ch-8)/4 + 18);
- else if (ch >= 34 && ch <= 64)
- idx = ((ch-34)/2 + 3 + 18);
- else if (ch >= 100 && ch <= 140)
- idx = ((ch-100)/4 + 15 + 18);
- else if (ch >= 149 && ch <= 165)
- idx = ((ch-149)/4 + 26 + 18);
- else
- idx = -1;
- break;
case IEEE80211_BAND_2GHZ:
+ /* channels 1..14 are mapped to 0..13 */
if (ch >= 1 && ch <= 14)
- idx = ch - 1;
- else
- idx = -1;
+ return ch - 1;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (ch) {
+ case 8 ... 16:
+ /* channels 8,12,16 are mapped to 18,19,20 */
+ return 18 + (ch-8)/4;
+ case 34 ... 48:
+ /* channels 34,36..48 are mapped to 21..28 */
+ return 21 + (ch-34)/2;
+ case 52 ... 64:
+ /* channels 52,56..64 are mapped to 29..32 */
+ return 29 + (ch-52)/4;
+ case 100 ... 140:
+ /* channels 100,104..140 are mapped to 33..43 */
+ return 33 + (ch-100)/4;
+ case 149 ... 165:
+ /* channels 149,153..165 are mapped to 44..48 */
+ return 44 + (ch-149)/4;
+ default:
+ break;
+ }
break;
default:
- wl1271_error("get reg conf ch idx - unknown band: %d",
- (int)band);
+ break;
}
- return idx;
+ wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch);
+ return -1;
}
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
@@ -1646,7 +1656,7 @@ void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
- if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+ if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 38995f9..bbdd106 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1062,7 +1062,8 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
static const char* const PLT_MODE[] = {
"PLT_OFF",
"PLT_ON",
- "PLT_FEM_DETECT"
+ "PLT_FEM_DETECT",
+ "PLT_CHIP_AWAKE"
};
int ret;
@@ -1088,9 +1089,11 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
if (ret < 0)
goto power_off;
- ret = wl->ops->plt_init(wl);
- if (ret < 0)
- goto power_off;
+ if (plt_mode != PLT_CHIP_AWAKE) {
+ ret = wl->ops->plt_init(wl);
+ if (ret < 0)
+ goto power_off;
+ }
wl->state = WLCORE_STATE_ON;
wl1271_notice("firmware booted in PLT mode %s (%s)",
@@ -2008,6 +2011,47 @@ out:
mutex_unlock(&wl->mutex);
}
+static void wlcore_pending_auth_complete_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct wl12xx_vif *wlvif;
+ unsigned long time_spare;
+ int ret;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wlvif = container_of(dwork, struct wl12xx_vif,
+ pending_auth_complete_work);
+ wl = wlvif->wl;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /*
+ * Make sure a second really passed since the last auth reply. Maybe
+ * a second auth reply arrived while we were stuck on the mutex.
+ * Check for a little less than the timeout to protect from scheduler
+ * irregularities.
+ */
+ time_spare = jiffies +
+ msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
+ if (!time_after(time_spare, wlvif->pending_auth_reply_time))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* cancel the ROC if active */
+ wlcore_update_inconn_sta(wl, wlvif, NULL, false);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2159,6 +2203,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlcore_channel_switch_work);
INIT_DELAYED_WORK(&wlvif->connection_loss_work,
wlcore_connection_loss_work);
+ INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
+ wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2376,6 +2422,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u8 role_type;
+ if (wl->plt) {
+ wl1271_error("Adding Interface not allowed while in PLT mode");
+ return -EBUSY;
+ }
+
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -2590,6 +2641,7 @@ unlock:
cancel_work_sync(&wlvif->rx_streaming_disable_work);
cancel_delayed_work_sync(&wlvif->connection_loss_work);
cancel_delayed_work_sync(&wlvif->channel_switch_work);
+ cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
mutex_lock(&wl->mutex);
}
@@ -2875,6 +2927,25 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->rate_set = wlvif->basic_rate_set;
}
+static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool idle)
+{
+ bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+
+ if (idle == cur_idle)
+ return;
+
+ if (idle) {
+ clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+ } else {
+ /* The current firmware only supports sched_scan in idle */
+ if (wl->sched_vif == wlvif)
+ wl->ops->sched_scan_stop(wl, wlvif);
+
+ set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+ }
+}
+
static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_conf *conf, u32 changed)
{
@@ -3969,6 +4040,13 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
}
} else {
if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ /*
+ * AP might be in ROC in case we have just
+ * sent auth reply. handle it.
+ */
+ if (test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
+
ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
@@ -4120,6 +4198,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
do_join = true;
}
+ if (changed & BSS_CHANGED_IDLE && !is_ibss)
+ wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+
if (changed & BSS_CHANGED_CQM) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
@@ -4656,29 +4737,49 @@ static void wlcore_roc_if_possible(struct wl1271 *wl,
wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
}
-static void wlcore_update_inconn_sta(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct wl1271_station *wl_sta,
- bool in_connection)
+/*
+ * when wl_sta is NULL, we treat this call as if coming from a
+ * pending auth reply.
+ * wl->mutex must be taken and the FW must be awake when the call
+ * takes place.
+ */
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta, bool in_conn)
{
- if (in_connection) {
- if (WARN_ON(wl_sta->in_connection))
+ if (in_conn) {
+ if (WARN_ON(wl_sta && wl_sta->in_connection))
return;
- wl_sta->in_connection = true;
- if (!wlvif->inconn_count++)
+
+ if (!wlvif->ap_pending_auth_reply &&
+ !wlvif->inconn_count)
wlcore_roc_if_possible(wl, wlvif);
+
+ if (wl_sta) {
+ wl_sta->in_connection = true;
+ wlvif->inconn_count++;
+ } else {
+ wlvif->ap_pending_auth_reply = true;
+ }
} else {
- if (!wl_sta->in_connection)
+ if (wl_sta && !wl_sta->in_connection)
+ return;
+
+ if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
return;
- wl_sta->in_connection = false;
- wlvif->inconn_count--;
- if (WARN_ON(wlvif->inconn_count < 0))
+ if (WARN_ON(wl_sta && !wlvif->inconn_count))
return;
- if (!wlvif->inconn_count)
- if (test_bit(wlvif->role_id, wl->roc_map))
- wl12xx_croc(wl, wlvif->role_id);
+ if (wl_sta) {
+ wl_sta->in_connection = false;
+ wlvif->inconn_count--;
+ } else {
+ wlvif->ap_pending_auth_reply = false;
+ }
+
+ if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
+ test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
}
}
@@ -5313,10 +5414,7 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
- { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
- { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
- { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
@@ -5896,14 +5994,20 @@ static const struct wiphy_wowlan_support wlcore_wowlan_support = {
};
#endif
+static irqreturn_t wlcore_hardirq(int irq, void *cookie)
+{
+ return IRQ_WAKE_THREAD;
+}
+
static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
struct platform_device *pdev = wl->pdev;
- struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
unsigned long irqflags;
int ret;
+ irq_handler_t hardirq_fn = NULL;
if (fw) {
wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
@@ -5932,12 +6036,14 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
wl->platform_quirks = pdata->platform_quirks;
wl->if_ops = pdev_data->if_ops;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
irqflags = IRQF_TRIGGER_RISING;
- else
+ hardirq_fn = wlcore_hardirq;
+ } else {
irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ }
- ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
+ ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
irqflags, pdev->name, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 98066d4..26bfc36 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -83,6 +83,10 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
struct wl12xx_vif *wlvif;
u32 timeout;
+ /* We do not enter elp sleep in PLT mode */
+ if (wl->plt)
+ return;
+
if (wl->sleep_auth != WL1271_PSM_ELP)
return;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index f407101..13e743d 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -174,17 +174,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
/* if radar is set, we ignore the passive flag */
(radar ||
!!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
- wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
- req_channels[i]->band,
- req_channels[i]->center_freq);
- wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
- req_channels[i]->hw_value,
- req_channels[i]->flags);
- wl1271_debug(DEBUG_SCAN, "max_power %d",
- req_channels[i]->max_power);
- wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
- min_dwell_time_active,
- max_dwell_time_active);
+
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -222,6 +212,17 @@ wlcore_scan_get_channels(struct wl1271 *wl,
*n_pactive_ch);
}
+ wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
+ req_channels[i]->center_freq,
+ req_channels[i]->hw_value,
+ req_channels[i]->flags,
+ req_channels[i]->max_power,
+ min_dwell_time_active,
+ max_dwell_time_active,
+ flags & IEEE80211_CHAN_RADAR ?
+ ", DFS" : "",
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ ", PASSIVE" : "");
j++;
}
}
@@ -364,7 +365,7 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
struct cfg80211_ssid *ssids = req->ssids;
int ret = 0, type, i, j, n_match_ssids = 0;
- wl1271_debug(DEBUG_CMD, "cmd sched scan ssid list");
+ wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list");
/* count the match sets that contain SSIDs */
for (i = 0; i < req->n_match_sets; i++)
@@ -442,8 +443,6 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
}
}
- wl1271_dump(DEBUG_SCAN, "SSID_LIST: ", cmd, sizeof(*cmd));
-
ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 1b0cd98..b2c018d 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -335,7 +335,7 @@ static int wl1271_probe(struct spi_device *spi)
if (!pdev_data)
goto out;
- pdev_data->pdata = spi->dev.platform_data;
+ pdev_data->pdata = dev_get_platdata(&spi->dev);
if (!pdev_data->pdata) {
dev_err(&spi->dev, "no platform data\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 527590f..a3b7d95 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -297,7 +297,8 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
ret = wl1271_plt_stop(wl);
break;
case PLT_ON:
- ret = wl1271_plt_start(wl, PLT_ON);
+ case PLT_CHIP_AWAKE:
+ ret = wl1271_plt_start(wl, val);
break;
case PLT_FEM_DETECT:
ret = wl1271_tm_detect_fem(wl, tb);
@@ -361,6 +362,7 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct wl1271 *wl = hw->priv;
struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
+ u32 nla_cmd;
int err;
err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
@@ -370,7 +372,14 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!tb[WL1271_TM_ATTR_CMD_ID])
return -EINVAL;
- switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
+ nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
+
+ /* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */
+ if (wl->plt_mode == PLT_CHIP_AWAKE &&
+ nla_cmd != WL1271_TM_CMD_SET_PLT_MODE)
+ return -EOPNOTSUPP;
+
+ switch (nla_cmd) {
case WL1271_TM_CMD_TEST:
return wl1271_tm_cmd_test(wl, tb);
case WL1271_TM_CMD_INTERROGATE:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 7e93fe6..87cd707 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -86,19 +86,34 @@ void wl1271_free_tx_id(struct wl1271 *wl, int id)
EXPORT_SYMBOL(wl1271_free_tx_id);
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
+ hdr = (struct ieee80211_hdr *)(skb->data +
+ sizeof(struct wl1271_tx_hw_descr));
+ if (!ieee80211_is_auth(hdr->frame_control))
+ return;
+
/*
* add the station to the known list before transmitting the
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
- hdr = (struct ieee80211_hdr *)(skb->data +
- sizeof(struct wl1271_tx_hw_descr));
- if (ieee80211_is_auth(hdr->frame_control))
- wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+ wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+
+ /*
+ * ROC for 1 second on the AP channel for completing the connection.
+ * Note the ROC will be continued by the update_sta_state callbacks
+ * once the station reaches the associated state.
+ */
+ wlcore_update_inconn_sta(wl, wlvif, NULL, true);
+ wlvif->pending_auth_reply_time = jiffies;
+ cancel_delayed_work(&wlvif->pending_auth_complete_work);
+ ieee80211_queue_delayed_work(wl->hw,
+ &wlvif->pending_auth_complete_work,
+ msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
}
static void wl1271_tx_regulate_link(struct wl1271 *wl,
@@ -386,7 +401,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (WARN_ON(is_wep && wlvif->default_key != idx)) {
+ if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
@@ -404,7 +419,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
- wl1271_tx_ap_update_inconnection_sta(wl, skb);
+ wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
wl1271_tx_regulate_link(wl, wlvif, hlid);
}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 55aa4ac..35489c3 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -56,6 +56,9 @@
/* Used for management frames and dummy packets */
#define WL1271_TID_MGMT 7
+/* stop a ROC for pending authentication reply after this time (ms) */
+#define WLCORE_PEND_AUTH_ROC_TIMEOUT 1000
+
struct wl127x_tx_mem {
/*
* Number of extra memory blocks to allocate for this packet
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0034979..54ce5d5 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -481,6 +481,8 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf);
void wlcore_regdomain_config(struct wl1271 *wl);
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta, bool in_conn);
static inline void
wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e5e1464..2a50e08 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -255,6 +255,7 @@ enum wl12xx_vif_flags {
WLVIF_FLAG_CS_PROGRESS,
WLVIF_FLAG_AP_PROBE_RESP_SET,
WLVIF_FLAG_IN_USE,
+ WLVIF_FLAG_ACTIVE,
};
struct wl12xx_vif;
@@ -307,6 +308,7 @@ enum plt_mode {
PLT_OFF = 0,
PLT_ON = 1,
PLT_FEM_DETECT = 2,
+ PLT_CHIP_AWAKE = 3
};
struct wl12xx_rx_filter_field {
@@ -456,6 +458,15 @@ struct wl12xx_vif {
*/
int hw_queue_base;
+ /* do we have a pending auth reply? (and ROC) */
+ bool ap_pending_auth_reply;
+
+ /* time when we sent the pending auth reply */
+ unsigned long pending_auth_reply_time;
+
+ /* work for canceling ROC after pending auth reply */
+ struct delayed_work pending_auth_complete_work;
+
/*
* This struct must be last!
* data that has to be saved acrossed reconfigs (e.g. recovery)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a197743..5715318 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -184,6 +184,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn);
void xenvif_disconnect(struct xenvif *vif);
+void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 625c6f4..01bb854 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -353,6 +353,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
}
netdev_dbg(dev, "Successfully created xenvif\n");
+
+ __module_get(THIS_MODULE);
+
return vif;
}
@@ -366,8 +369,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
if (vif->tx_irq)
return 0;
- __module_get(THIS_MODULE);
-
err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
@@ -406,7 +407,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
init_waitqueue_head(&vif->wq);
vif->task = kthread_create(xenvif_kthread,
- (void *)vif, vif->dev->name);
+ (void *)vif, "%s", vif->dev->name);
if (IS_ERR(vif->task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
err = PTR_ERR(vif->task);
@@ -452,12 +453,6 @@ void xenvif_carrier_off(struct xenvif *vif)
void xenvif_disconnect(struct xenvif *vif)
{
- /* Disconnect funtion might get called by generic framework
- * even before vif connects, so we need to check if we really
- * need to do a module_put.
- */
- int need_module_put = 0;
-
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
@@ -468,23 +463,22 @@ void xenvif_disconnect(struct xenvif *vif)
unbind_from_irqhandler(vif->tx_irq, vif);
unbind_from_irqhandler(vif->rx_irq, vif);
}
- /* vif->irq is valid, we had a module_get in
- * xenvif_connect.
- */
- need_module_put = 1;
+ vif->tx_irq = 0;
}
if (vif->task)
kthread_stop(vif->task);
+ xenvif_unmap_frontend_rings(vif);
+}
+
+void xenvif_free(struct xenvif *vif)
+{
netif_napi_del(&vif->napi);
unregister_netdev(vif->dev);
- xenvif_unmap_frontend_rings(vif);
-
free_netdev(vif->dev);
- if (need_module_put)
- module_put(THIS_MODULE);
+ module_put(THIS_MODULE);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 956130c..f3e591c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -212,6 +212,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
return false;
}
+struct xenvif_count_slot_state {
+ unsigned long copy_off;
+ bool head;
+};
+
+unsigned int xenvif_count_frag_slots(struct xenvif *vif,
+ unsigned long offset, unsigned long size,
+ struct xenvif_count_slot_state *state)
+{
+ unsigned count = 0;
+
+ offset &= ~PAGE_MASK;
+
+ while (size > 0) {
+ unsigned long bytes;
+
+ bytes = PAGE_SIZE - offset;
+
+ if (bytes > size)
+ bytes = size;
+
+ if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
+ count++;
+ state->copy_off = 0;
+ }
+
+ if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
+ bytes = MAX_BUFFER_OFFSET - state->copy_off;
+
+ state->copy_off += bytes;
+
+ offset += bytes;
+ size -= bytes;
+
+ if (offset == PAGE_SIZE)
+ offset = 0;
+
+ state->head = false;
+ }
+
+ return count;
+}
+
/*
* Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of
@@ -219,48 +262,39 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
*/
unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
+ struct xenvif_count_slot_state state;
unsigned int count;
- int i, copy_off;
+ unsigned char *data;
+ unsigned i;
- count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
+ state.head = true;
+ state.copy_off = 0;
- copy_off = skb_headlen(skb) % PAGE_SIZE;
+ /* Slot for the first (partial) page of data. */
+ count = 1;
+ /* Need a slot for the GSO prefix for GSO extra data? */
if (skb_shinfo(skb)->gso_size)
count++;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
- unsigned long bytes;
-
- offset &= ~PAGE_MASK;
-
- while (size > 0) {
- BUG_ON(offset >= PAGE_SIZE);
- BUG_ON(copy_off > MAX_BUFFER_OFFSET);
-
- bytes = PAGE_SIZE - offset;
-
- if (bytes > size)
- bytes = size;
+ data = skb->data;
+ while (data < skb_tail_pointer(skb)) {
+ unsigned long offset = offset_in_page(data);
+ unsigned long size = PAGE_SIZE - offset;
- if (start_new_rx_buffer(copy_off, bytes, 0)) {
- count++;
- copy_off = 0;
- }
+ if (data + size > skb_tail_pointer(skb))
+ size = skb_tail_pointer(skb) - data;
- if (copy_off + bytes > MAX_BUFFER_OFFSET)
- bytes = MAX_BUFFER_OFFSET - copy_off;
+ count += xenvif_count_frag_slots(vif, offset, size, &state);
- copy_off += bytes;
+ data += size;
+ }
- offset += bytes;
- size -= bytes;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
- if (offset == PAGE_SIZE)
- offset = 0;
- }
+ count += xenvif_count_frag_slots(vif, offset, size, &state);
}
return count;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1fe48fe3..a53782e 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -42,7 +42,7 @@ static int netback_remove(struct xenbus_device *dev)
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
- xenvif_disconnect(be->vif);
+ xenvif_free(be->vif);
be->vif = NULL;
}
kfree(be);
@@ -213,9 +213,18 @@ static void disconnect_backend(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
+ if (be->vif)
+ xenvif_disconnect(be->vif);
+}
+
+static void destroy_backend(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
if (be->vif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
- xenvif_disconnect(be->vif);
+ xenvif_free(be->vif);
be->vif = NULL;
}
}
@@ -246,14 +255,11 @@ static void frontend_changed(struct xenbus_device *dev,
case XenbusStateConnected:
if (dev->state == XenbusStateConnected)
break;
- backend_create_xenvif(be);
if (be->vif)
connect(be);
break;
case XenbusStateClosing:
- if (be->vif)
- kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
disconnect_backend(dev);
xenbus_switch_state(dev, XenbusStateClosing);
break;
@@ -262,6 +268,7 @@ static void frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
+ destroy_backend(dev);
/* fall through if not online */
case XenbusStateUnknown:
device_unregister(&dev->dev);
diff --git a/drivers/ntb/Kconfig b/drivers/ntb/Kconfig
index 37ee649..f69df793 100644
--- a/drivers/ntb/Kconfig
+++ b/drivers/ntb/Kconfig
@@ -1,7 +1,7 @@
config NTB
tristate "Intel Non-Transparent Bridge support"
depends on PCI
- depends on X86_64
+ depends on X86
help
The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
connecting 2 systems. When configured, writes to the device's PCI
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 2dacd19..1cb6e51 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -46,24 +46,30 @@
* Jon Mason <jon.mason@intel.com>
*/
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include "ntb_hw.h"
#include "ntb_regs.h"
#define NTB_NAME "Intel(R) PCI-E Non-Transparent Bridge Driver"
-#define NTB_VER "0.25"
+#define NTB_VER "1.0"
MODULE_DESCRIPTION(NTB_NAME);
MODULE_VERSION(NTB_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
+static bool xeon_errata_workaround = true;
+module_param(xeon_errata_workaround, bool, 0644);
+MODULE_PARM_DESC(xeon_errata_workaround, "Workaround for the Xeon Errata");
+
enum {
- NTB_CONN_CLASSIC = 0,
+ NTB_CONN_TRANSPARENT = 0,
NTB_CONN_B2B,
NTB_CONN_RP,
};
@@ -78,17 +84,27 @@ enum {
BWD_HW,
};
+static struct dentry *debugfs_dir;
+
+#define BWD_LINK_RECOVERY_TIME 500
+
/* Translate memory window 0,1 to BAR 2,4 */
-#define MW_TO_BAR(mw) (mw * 2 + 2)
+#define MW_TO_BAR(mw) (mw * NTB_MAX_NUM_MW + 2)
static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_JSF)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
{0}
};
MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
@@ -129,6 +145,7 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
* ntb_register_db_callback() - register a callback for doorbell interrupt
* @ndev: pointer to ntb_device instance
* @idx: doorbell index to register callback, zero based
+ * @data: pointer to be returned to caller with every callback
* @func: callback function to register
*
* This function registers a callback function for the doorbell interrupt
@@ -151,9 +168,9 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
ndev->db_cb[idx].data = data;
/* unmask interrupt */
- mask = readw(ndev->reg_ofs.pdb_mask);
+ mask = readw(ndev->reg_ofs.ldb_mask);
clear_bit(idx * ndev->bits_per_vector, &mask);
- writew(mask, ndev->reg_ofs.pdb_mask);
+ writew(mask, ndev->reg_ofs.ldb_mask);
return 0;
}
@@ -173,9 +190,9 @@ void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
return;
- mask = readw(ndev->reg_ofs.pdb_mask);
+ mask = readw(ndev->reg_ofs.ldb_mask);
set_bit(idx * ndev->bits_per_vector, &mask);
- writew(mask, ndev->reg_ofs.pdb_mask);
+ writew(mask, ndev->reg_ofs.ldb_mask);
ndev->db_cb[idx].callback = NULL;
}
@@ -334,6 +351,23 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
}
/**
+ * ntb_get_mw_base() - get addr for the NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the base address of the memory window specified.
+ *
+ * RETURNS: address, or NULL on error.
+ */
+resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw)
+{
+ if (mw >= ntb_max_mw(ndev))
+ return 0;
+
+ return pci_resource_start(ndev->pdev, MW_TO_BAR(mw));
+}
+
+/**
* ntb_get_mw_vbase() - get virtual addr for the NTB memory window
* @ndev: pointer to ntb_device instance
* @mw: memory window number
@@ -345,7 +379,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
*/
void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
{
- if (mw >= NTB_NUM_MW)
+ if (mw >= ntb_max_mw(ndev))
return NULL;
return ndev->mw[mw].vbase;
@@ -360,9 +394,9 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
*
* RETURNS: the size of the memory window or zero on error
*/
-resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
+u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
{
- if (mw >= NTB_NUM_MW)
+ if (mw >= ntb_max_mw(ndev))
return 0;
return ndev->mw[mw].bar_sz;
@@ -380,7 +414,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
*/
void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
{
- if (mw >= NTB_NUM_MW)
+ if (mw >= ntb_max_mw(ndev))
return;
dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
@@ -390,16 +424,16 @@ void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
switch (MW_TO_BAR(mw)) {
case NTB_BAR_23:
- writeq(addr, ndev->reg_ofs.sbar2_xlat);
+ writeq(addr, ndev->reg_ofs.bar2_xlat);
break;
case NTB_BAR_45:
- writeq(addr, ndev->reg_ofs.sbar4_xlat);
+ writeq(addr, ndev->reg_ofs.bar4_xlat);
break;
}
}
/**
- * ntb_ring_sdb() - Set the doorbell on the secondary/external side
+ * ntb_ring_doorbell() - Set the doorbell on the secondary/external side
* @ndev: pointer to ntb_device instance
* @db: doorbell to ring
*
@@ -408,15 +442,58 @@ void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-void ntb_ring_sdb(struct ntb_device *ndev, unsigned int db)
+void ntb_ring_doorbell(struct ntb_device *ndev, unsigned int db)
{
dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
if (ndev->hw_type == BWD_HW)
- writeq((u64) 1 << db, ndev->reg_ofs.sdb);
+ writeq((u64) 1 << db, ndev->reg_ofs.rdb);
else
writew(((1 << ndev->bits_per_vector) - 1) <<
- (db * ndev->bits_per_vector), ndev->reg_ofs.sdb);
+ (db * ndev->bits_per_vector), ndev->reg_ofs.rdb);
+}
+
+static void bwd_recover_link(struct ntb_device *ndev)
+{
+ u32 status;
+
+ /* Driver resets the NTB ModPhy lanes - magic! */
+ writeb(0xe0, ndev->reg_base + BWD_MODPHY_PCSREG6);
+ writeb(0x40, ndev->reg_base + BWD_MODPHY_PCSREG4);
+ writeb(0x60, ndev->reg_base + BWD_MODPHY_PCSREG4);
+ writeb(0x60, ndev->reg_base + BWD_MODPHY_PCSREG6);
+
+ /* Driver waits 100ms to allow the NTB ModPhy to settle */
+ msleep(100);
+
+ /* Clear AER Errors, write to clear */
+ status = readl(ndev->reg_base + BWD_ERRCORSTS_OFFSET);
+ dev_dbg(&ndev->pdev->dev, "ERRCORSTS = %x\n", status);
+ status &= PCI_ERR_COR_REP_ROLL;
+ writel(status, ndev->reg_base + BWD_ERRCORSTS_OFFSET);
+
+ /* Clear unexpected electrical idle event in LTSSM, write to clear */
+ status = readl(ndev->reg_base + BWD_LTSSMERRSTS0_OFFSET);
+ dev_dbg(&ndev->pdev->dev, "LTSSMERRSTS0 = %x\n", status);
+ status |= BWD_LTSSMERRSTS0_UNEXPECTEDEI;
+ writel(status, ndev->reg_base + BWD_LTSSMERRSTS0_OFFSET);
+
+ /* Clear DeSkew Buffer error, write to clear */
+ status = readl(ndev->reg_base + BWD_DESKEWSTS_OFFSET);
+ dev_dbg(&ndev->pdev->dev, "DESKEWSTS = %x\n", status);
+ status |= BWD_DESKEWSTS_DBERR;
+ writel(status, ndev->reg_base + BWD_DESKEWSTS_OFFSET);
+
+ status = readl(ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
+ dev_dbg(&ndev->pdev->dev, "IBSTERRRCRVSTS0 = %x\n", status);
+ status &= BWD_IBIST_ERR_OFLOW;
+ writel(status, ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
+
+ /* Releases the NTB state machine to allow the link to retrain */
+ status = readl(ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
+ dev_dbg(&ndev->pdev->dev, "LTSSMSTATEJMP = %x\n", status);
+ status &= ~BWD_LTSSMSTATEJMP_FORCEDETECT;
+ writel(status, ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
}
static void ntb_link_event(struct ntb_device *ndev, int link_state)
@@ -433,7 +510,8 @@ static void ntb_link_event(struct ntb_device *ndev, int link_state)
ndev->link_status = NTB_LINK_UP;
event = NTB_EVENT_HW_LINK_UP;
- if (ndev->hw_type == BWD_HW)
+ if (ndev->hw_type == BWD_HW ||
+ ndev->conn_type == NTB_CONN_TRANSPARENT)
status = readw(ndev->reg_ofs.lnk_stat);
else {
int rc = pci_read_config_word(ndev->pdev,
@@ -442,13 +520,16 @@ static void ntb_link_event(struct ntb_device *ndev, int link_state)
if (rc)
return;
}
+
+ ndev->link_width = (status & NTB_LINK_WIDTH_MASK) >> 4;
+ ndev->link_speed = (status & NTB_LINK_SPEED_MASK);
dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
- (status & NTB_LINK_WIDTH_MASK) >> 4,
- (status & NTB_LINK_SPEED_MASK));
+ ndev->link_width, ndev->link_speed);
} else {
dev_info(&ndev->pdev->dev, "Link Down\n");
ndev->link_status = NTB_LINK_DOWN;
event = NTB_EVENT_HW_LINK_DOWN;
+ /* Don't modify link width/speed, we need it in link recovery */
}
/* notify the upper layer if we have an event change */
@@ -488,6 +569,47 @@ static int ntb_link_status(struct ntb_device *ndev)
return 0;
}
+static void bwd_link_recovery(struct work_struct *work)
+{
+ struct ntb_device *ndev = container_of(work, struct ntb_device,
+ lr_timer.work);
+ u32 status32;
+
+ bwd_recover_link(ndev);
+ /* There is a potential race between the 2 NTB devices recovering at the
+ * same time. If the times are the same, the link will not recover and
+ * the driver will be stuck in this loop forever. Add a random interval
+ * to the recovery time to prevent this race.
+ */
+ msleep(BWD_LINK_RECOVERY_TIME + prandom_u32() % BWD_LINK_RECOVERY_TIME);
+
+ status32 = readl(ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
+ if (status32 & BWD_LTSSMSTATEJMP_FORCEDETECT)
+ goto retry;
+
+ status32 = readl(ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
+ if (status32 & BWD_IBIST_ERR_OFLOW)
+ goto retry;
+
+ status32 = readl(ndev->reg_ofs.lnk_cntl);
+ if (!(status32 & BWD_CNTL_LINK_DOWN)) {
+ unsigned char speed, width;
+ u16 status16;
+
+ status16 = readw(ndev->reg_ofs.lnk_stat);
+ width = (status16 & NTB_LINK_WIDTH_MASK) >> 4;
+ speed = (status16 & NTB_LINK_SPEED_MASK);
+ if (ndev->link_width != width || ndev->link_speed != speed)
+ goto retry;
+ }
+
+ schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+ return;
+
+retry:
+ schedule_delayed_work(&ndev->lr_timer, NTB_HB_TIMEOUT);
+}
+
/* BWD doesn't have link status interrupt, poll on that platform */
static void bwd_link_poll(struct work_struct *work)
{
@@ -503,6 +625,16 @@ static void bwd_link_poll(struct work_struct *work)
if (rc)
dev_err(&ndev->pdev->dev,
"Error determining link status\n");
+
+ /* Check to see if a link error is the cause of the link down */
+ if (ndev->link_status == NTB_LINK_DOWN) {
+ u32 status32 = readl(ndev->reg_base +
+ BWD_LTSSMSTATEJMP_OFFSET);
+ if (status32 & BWD_LTSSMSTATEJMP_FORCEDETECT) {
+ schedule_delayed_work(&ndev->lr_timer, 0);
+ return;
+ }
+ }
}
schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
@@ -519,41 +651,174 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
if (rc)
return rc;
+ if (val & SNB_PPD_DEV_TYPE)
+ ndev->dev_type = NTB_DEV_USD;
+ else
+ ndev->dev_type = NTB_DEV_DSD;
+
switch (val & SNB_PPD_CONN_TYPE) {
case NTB_CONN_B2B:
+ dev_info(&ndev->pdev->dev, "Conn Type = B2B\n");
ndev->conn_type = NTB_CONN_B2B;
+ ndev->reg_ofs.ldb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
+ ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
+ ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
+ ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
+ ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
+ ndev->limits.max_spads = SNB_MAX_B2B_SPADS;
+
+ /* There is a Xeon hardware errata related to writes to
+ * SDOORBELL or B2BDOORBELL in conjunction with inbound access
+ * to NTB MMIO Space, which may hang the system. To workaround
+ * this use the second memory window to access the interrupt and
+ * scratch pad registers on the remote system.
+ */
+ if (xeon_errata_workaround) {
+ if (!ndev->mw[1].bar_sz)
+ return -EINVAL;
+
+ ndev->limits.max_mw = SNB_ERRATA_MAX_MW;
+ ndev->reg_ofs.spad_write = ndev->mw[1].vbase +
+ SNB_SPAD_OFFSET;
+ ndev->reg_ofs.rdb = ndev->mw[1].vbase +
+ SNB_PDOORBELL_OFFSET;
+
+ /* Set the Limit register to 4k, the minimum size, to
+ * prevent an illegal access
+ */
+ writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base +
+ SNB_PBAR4LMT_OFFSET);
+ } else {
+ ndev->limits.max_mw = SNB_MAX_MW;
+ ndev->reg_ofs.spad_write = ndev->reg_base +
+ SNB_B2B_SPAD_OFFSET;
+ ndev->reg_ofs.rdb = ndev->reg_base +
+ SNB_B2B_DOORBELL_OFFSET;
+
+ /* Disable the Limit register, just incase it is set to
+ * something silly
+ */
+ writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
+ }
+
+ /* The Xeon errata workaround requires setting SBAR Base
+ * addresses to known values, so that the PBAR XLAT can be
+ * pointed at SBAR0 of the remote system.
+ */
+ if (ndev->dev_type == NTB_DEV_USD) {
+ writeq(SNB_MBAR23_DSD_ADDR, ndev->reg_base +
+ SNB_PBAR2XLAT_OFFSET);
+ if (xeon_errata_workaround)
+ writeq(SNB_MBAR01_DSD_ADDR, ndev->reg_base +
+ SNB_PBAR4XLAT_OFFSET);
+ else {
+ writeq(SNB_MBAR45_DSD_ADDR, ndev->reg_base +
+ SNB_PBAR4XLAT_OFFSET);
+ /* B2B_XLAT_OFFSET is a 64bit register, but can
+ * only take 32bit writes
+ */
+ writel(SNB_MBAR01_DSD_ADDR & 0xffffffff,
+ ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
+ writel(SNB_MBAR01_DSD_ADDR >> 32,
+ ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
+ }
+
+ writeq(SNB_MBAR01_USD_ADDR, ndev->reg_base +
+ SNB_SBAR0BASE_OFFSET);
+ writeq(SNB_MBAR23_USD_ADDR, ndev->reg_base +
+ SNB_SBAR2BASE_OFFSET);
+ writeq(SNB_MBAR45_USD_ADDR, ndev->reg_base +
+ SNB_SBAR4BASE_OFFSET);
+ } else {
+ writeq(SNB_MBAR23_USD_ADDR, ndev->reg_base +
+ SNB_PBAR2XLAT_OFFSET);
+ if (xeon_errata_workaround)
+ writeq(SNB_MBAR01_USD_ADDR, ndev->reg_base +
+ SNB_PBAR4XLAT_OFFSET);
+ else {
+ writeq(SNB_MBAR45_USD_ADDR, ndev->reg_base +
+ SNB_PBAR4XLAT_OFFSET);
+ /* B2B_XLAT_OFFSET is a 64bit register, but can
+ * only take 32bit writes
+ */
+ writel(SNB_MBAR01_DSD_ADDR & 0xffffffff,
+ ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
+ writel(SNB_MBAR01_USD_ADDR >> 32,
+ ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
+ }
+ writeq(SNB_MBAR01_DSD_ADDR, ndev->reg_base +
+ SNB_SBAR0BASE_OFFSET);
+ writeq(SNB_MBAR23_DSD_ADDR, ndev->reg_base +
+ SNB_SBAR2BASE_OFFSET);
+ writeq(SNB_MBAR45_DSD_ADDR, ndev->reg_base +
+ SNB_SBAR4BASE_OFFSET);
+ }
break;
- case NTB_CONN_CLASSIC:
case NTB_CONN_RP:
+ dev_info(&ndev->pdev->dev, "Conn Type = RP\n");
+ ndev->conn_type = NTB_CONN_RP;
+
+ if (xeon_errata_workaround) {
+ dev_err(&ndev->pdev->dev,
+ "NTB-RP disabled due to hardware errata. To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n");
+ return -EINVAL;
+ }
+
+ /* Scratch pads need to have exclusive access from the primary
+ * or secondary side. Halve the num spads so that each side can
+ * have an equal amount.
+ */
+ ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
+ /* Note: The SDOORBELL is the cause of the errata. You REALLY
+ * don't want to touch it.
+ */
+ ndev->reg_ofs.rdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
+ ndev->reg_ofs.ldb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
+ ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
+ /* Offset the start of the spads to correspond to whether it is
+ * primary or secondary
+ */
+ ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET +
+ ndev->limits.max_spads * 4;
+ ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
+ ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
+ ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
+ ndev->limits.max_mw = SNB_MAX_MW;
+ break;
+ case NTB_CONN_TRANSPARENT:
+ dev_info(&ndev->pdev->dev, "Conn Type = TRANSPARENT\n");
+ ndev->conn_type = NTB_CONN_TRANSPARENT;
+ /* Scratch pads need to have exclusive access from the primary
+ * or secondary side. Halve the num spads so that each side can
+ * have an equal amount.
+ */
+ ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
+ ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
+ ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
+ ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
+ /* Offset the start of the spads to correspond to whether it is
+ * primary or secondary
+ */
+ ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET +
+ ndev->limits.max_spads * 4;
+ ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_PBAR2XLAT_OFFSET;
+ ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_PBAR4XLAT_OFFSET;
+
+ ndev->limits.max_mw = SNB_MAX_MW;
+ break;
default:
- dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+ /* Most likely caused by the remote NTB-RP device not being
+ * configured
+ */
+ dev_err(&ndev->pdev->dev, "Unknown PPD %x\n", val);
return -EINVAL;
}
- if (val & SNB_PPD_DEV_TYPE)
- ndev->dev_type = NTB_DEV_DSD;
- else
- ndev->dev_type = NTB_DEV_USD;
-
- ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
- ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
- ndev->reg_ofs.sbar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
- ndev->reg_ofs.sbar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
- ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_LINK_STATUS_OFFSET;
- ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
+ ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET;
ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
- if (ndev->conn_type == NTB_CONN_B2B) {
- ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
- ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
- ndev->limits.max_spads = SNB_MAX_SPADS;
- } else {
- ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
- ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
- ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS;
- }
-
ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
ndev->limits.msix_cnt = SNB_MSIX_CNT;
ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
@@ -578,7 +843,7 @@ static int ntb_bwd_setup(struct ntb_device *ndev)
break;
case NTB_CONN_RP:
default:
- dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+ dev_err(&ndev->pdev->dev, "Unsupported NTB configuration\n");
return -EINVAL;
}
@@ -593,31 +858,25 @@ static int ntb_bwd_setup(struct ntb_device *ndev)
if (rc)
return rc;
- ndev->reg_ofs.pdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
- ndev->reg_ofs.pdb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
- ndev->reg_ofs.sbar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
- ndev->reg_ofs.sbar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
+ ndev->reg_ofs.ldb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+ ndev->reg_ofs.ldb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
+ ndev->reg_ofs.rdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
+ ndev->reg_ofs.bar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
+ ndev->reg_ofs.bar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
-
- if (ndev->conn_type == NTB_CONN_B2B) {
- ndev->reg_ofs.sdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
- ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
- ndev->limits.max_spads = BWD_MAX_SPADS;
- } else {
- ndev->reg_ofs.sdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
- ndev->reg_ofs.spad_write = ndev->reg_base + BWD_SPAD_OFFSET;
- ndev->limits.max_spads = BWD_MAX_COMPAT_SPADS;
- }
-
+ ndev->limits.max_mw = BWD_MAX_MW;
+ ndev->limits.max_spads = BWD_MAX_SPADS;
ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
ndev->limits.msix_cnt = BWD_MSIX_CNT;
ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
/* Since bwd doesn't have a link interrupt, setup a poll timer */
INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
+ INIT_DELAYED_WORK(&ndev->lr_timer, bwd_link_recovery);
schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
return 0;
@@ -628,13 +887,18 @@ static int ntb_device_setup(struct ntb_device *ndev)
int rc;
switch (ndev->pdev->device) {
- case PCI_DEVICE_ID_INTEL_NTB_2ND_SNB:
- case PCI_DEVICE_ID_INTEL_NTB_RP_JSF:
- case PCI_DEVICE_ID_INTEL_NTB_RP_SNB:
- case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF:
- case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
rc = ntb_xeon_setup(ndev);
break;
case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
@@ -644,16 +908,26 @@ static int ntb_device_setup(struct ntb_device *ndev)
rc = -ENODEV;
}
- /* Enable Bus Master and Memory Space on the secondary side */
- writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
+ if (rc)
+ return rc;
+
+ dev_info(&ndev->pdev->dev, "Device Type = %s\n",
+ ndev->dev_type == NTB_DEV_USD ? "USD/DSP" : "DSD/USP");
- return rc;
+ if (ndev->conn_type == NTB_CONN_B2B)
+ /* Enable Bus Master and Memory Space on the secondary side */
+ writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+ ndev->reg_ofs.spci_cmd);
+
+ return 0;
}
static void ntb_device_free(struct ntb_device *ndev)
{
- if (ndev->hw_type == BWD_HW)
+ if (ndev->hw_type == BWD_HW) {
cancel_delayed_work_sync(&ndev->hb_timer);
+ cancel_delayed_work_sync(&ndev->lr_timer);
+ }
}
static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
@@ -672,7 +946,7 @@ static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
*/
ndev->last_ts = jiffies;
- writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.pdb);
+ writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.ldb);
return IRQ_HANDLED;
}
@@ -694,7 +968,7 @@ static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
* interrupts.
*/
writew(((1 << ndev->bits_per_vector) - 1) <<
- (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.pdb);
+ (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.ldb);
return IRQ_HANDLED;
}
@@ -712,7 +986,7 @@ static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
dev_err(&ndev->pdev->dev, "Error determining link status\n");
/* bit 15 is always the link bit */
- writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.pdb);
+ writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.ldb);
return IRQ_HANDLED;
}
@@ -723,29 +997,28 @@ static irqreturn_t ntb_interrupt(int irq, void *dev)
unsigned int i = 0;
if (ndev->hw_type == BWD_HW) {
- u64 pdb = readq(ndev->reg_ofs.pdb);
+ u64 ldb = readq(ndev->reg_ofs.ldb);
- dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %Lx\n", irq, pdb);
+ dev_dbg(&ndev->pdev->dev, "irq %d - ldb = %Lx\n", irq, ldb);
- while (pdb) {
- i = __ffs(pdb);
- pdb &= pdb - 1;
+ while (ldb) {
+ i = __ffs(ldb);
+ ldb &= ldb - 1;
bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
}
} else {
- u16 pdb = readw(ndev->reg_ofs.pdb);
+ u16 ldb = readw(ndev->reg_ofs.ldb);
- dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %x sdb %x\n", irq,
- pdb, readw(ndev->reg_ofs.sdb));
+ dev_dbg(&ndev->pdev->dev, "irq %d - ldb = %x\n", irq, ldb);
- if (pdb & SNB_DB_HW_LINK) {
+ if (ldb & SNB_DB_HW_LINK) {
xeon_event_msix_irq(irq, dev);
- pdb &= ~SNB_DB_HW_LINK;
+ ldb &= ~SNB_DB_HW_LINK;
}
- while (pdb) {
- i = __ffs(pdb);
- pdb &= pdb - 1;
+ while (ldb) {
+ i = __ffs(ldb);
+ ldb &= ldb - 1;
xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
}
}
@@ -758,16 +1031,15 @@ static int ntb_setup_msix(struct ntb_device *ndev)
struct pci_dev *pdev = ndev->pdev;
struct msix_entry *msix;
int msix_entries;
- int rc, i, pos;
+ int rc, i;
u16 val;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (!pos) {
+ if (!pdev->msix_cap) {
rc = -EIO;
goto err;
}
- rc = pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &val);
+ rc = pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &val);
if (rc)
goto err;
@@ -903,10 +1175,10 @@ static int ntb_setup_interrupts(struct ntb_device *ndev)
* Interrupt. The rest will be unmasked as callbacks are registered.
*/
if (ndev->hw_type == BWD_HW)
- writeq(~0, ndev->reg_ofs.pdb_mask);
+ writeq(~0, ndev->reg_ofs.ldb_mask);
else
writew(~(1 << ndev->limits.max_db_bits),
- ndev->reg_ofs.pdb_mask);
+ ndev->reg_ofs.ldb_mask);
rc = ntb_setup_msix(ndev);
if (!rc)
@@ -935,9 +1207,9 @@ static void ntb_free_interrupts(struct ntb_device *ndev)
/* mask interrupts */
if (ndev->hw_type == BWD_HW)
- writeq(~0, ndev->reg_ofs.pdb_mask);
+ writeq(~0, ndev->reg_ofs.ldb_mask);
else
- writew(~0, ndev->reg_ofs.pdb_mask);
+ writew(~0, ndev->reg_ofs.ldb_mask);
if (ndev->num_msix) {
struct msix_entry *msix;
@@ -963,9 +1235,9 @@ static int ntb_create_callbacks(struct ntb_device *ndev)
{
int i;
- /* Checken-egg issue. We won't know how many callbacks are necessary
+ /* Chicken-egg issue. We won't know how many callbacks are necessary
* until we see how many MSI-X vectors we get, but these pointers need
- * to be passed into the MSI-X register fucntion. So, we allocate the
+ * to be passed into the MSI-X register function. So, we allocate the
* max, knowing that they might not all be used, to work around this.
*/
ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
@@ -992,6 +1264,28 @@ static void ntb_free_callbacks(struct ntb_device *ndev)
kfree(ndev->db_cb);
}
+static void ntb_setup_debugfs(struct ntb_device *ndev)
+{
+ if (!debugfs_initialized())
+ return;
+
+ if (!debugfs_dir)
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->pdev),
+ debugfs_dir);
+}
+
+static void ntb_free_debugfs(struct ntb_device *ndev)
+{
+ debugfs_remove_recursive(ndev->debugfs_dir);
+
+ if (debugfs_dir && simple_empty(debugfs_dir)) {
+ debugfs_remove_recursive(debugfs_dir);
+ debugfs_dir = NULL;
+ }
+}
+
static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ntb_device *ndev;
@@ -1004,6 +1298,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ndev->pdev = pdev;
ndev->link_status = NTB_LINK_DOWN;
pci_set_drvdata(pdev, ndev);
+ ntb_setup_debugfs(ndev);
rc = pci_enable_device(pdev);
if (rc)
@@ -1022,13 +1317,13 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err2;
}
- for (i = 0; i < NTB_NUM_MW; i++) {
+ for (i = 0; i < NTB_MAX_NUM_MW; i++) {
ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
ndev->mw[i].vbase =
ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
ndev->mw[i].bar_sz);
dev_info(&pdev->dev, "MW %d size %llu\n", i,
- pci_resource_len(pdev, MW_TO_BAR(i)));
+ (unsigned long long) ndev->mw[i].bar_sz);
if (!ndev->mw[i].vbase) {
dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
MW_TO_BAR(i));
@@ -1100,6 +1395,7 @@ err2:
err1:
pci_disable_device(pdev);
err:
+ ntb_free_debugfs(ndev);
kfree(ndev);
dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
@@ -1114,7 +1410,7 @@ static void ntb_pci_remove(struct pci_dev *pdev)
/* Bring NTB link down */
ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
- ntb_cntl |= NTB_LINK_DISABLE;
+ ntb_cntl |= NTB_CNTL_LINK_DISABLE;
writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
ntb_transport_free(ndev->ntb_transport);
@@ -1123,12 +1419,13 @@ static void ntb_pci_remove(struct pci_dev *pdev)
ntb_free_callbacks(ndev);
ntb_device_free(ndev);
- for (i = 0; i < NTB_NUM_MW; i++)
+ for (i = 0; i < NTB_MAX_NUM_MW; i++)
iounmap(ndev->mw[i].vbase);
iounmap(ndev->reg_base);
pci_release_selected_regions(pdev, NTB_BAR_MASK);
pci_disable_device(pdev);
+ ntb_free_debugfs(ndev);
kfree(ndev);
}
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index 3a3038c..0a31ced 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -47,16 +47,36 @@
*/
#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
-#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF 0x3726
-#define PCI_DEVICE_ID_INTEL_NTB_RP_JSF 0x3727
-#define PCI_DEVICE_ID_INTEL_NTB_RP_SNB 0x3C08
+#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
+#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
-#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB 0x3C0E
-#define PCI_DEVICE_ID_INTEL_NTB_2ND_SNB 0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB 0x3C0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB 0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT 0x0E0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT 0x0E0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT 0x0E0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX 0x2F0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(val & 0xffffffff, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
#define NTB_BAR_MMIO 0
#define NTB_BAR_23 2
#define NTB_BAR_45 4
@@ -68,7 +88,7 @@
#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
-#define NTB_NUM_MW 2
+#define NTB_MAX_NUM_MW 2
enum ntb_hw_event {
NTB_EVENT_SW_EVENT0 = 0,
@@ -96,18 +116,19 @@ struct ntb_device {
struct pci_dev *pdev;
struct msix_entry *msix_entries;
void __iomem *reg_base;
- struct ntb_mw mw[NTB_NUM_MW];
+ struct ntb_mw mw[NTB_MAX_NUM_MW];
struct {
- unsigned int max_spads;
- unsigned int max_db_bits;
- unsigned int msix_cnt;
+ unsigned char max_mw;
+ unsigned char max_spads;
+ unsigned char max_db_bits;
+ unsigned char msix_cnt;
} limits;
struct {
- void __iomem *pdb;
- void __iomem *pdb_mask;
- void __iomem *sdb;
- void __iomem *sbar2_xlat;
- void __iomem *sbar4_xlat;
+ void __iomem *ldb;
+ void __iomem *ldb_mask;
+ void __iomem *rdb;
+ void __iomem *bar2_xlat;
+ void __iomem *bar4_xlat;
void __iomem *spad_write;
void __iomem *spad_read;
void __iomem *lnk_cntl;
@@ -124,12 +145,45 @@ struct ntb_device {
unsigned char num_msix;
unsigned char bits_per_vector;
unsigned char max_cbs;
+ unsigned char link_width;
+ unsigned char link_speed;
unsigned char link_status;
+
struct delayed_work hb_timer;
unsigned long last_ts;
+
+ struct delayed_work lr_timer;
+
+ struct dentry *debugfs_dir;
};
/**
+ * ntb_max_cbs() - return the max callbacks
+ * @ndev: pointer to ntb_device instance
+ *
+ * Given the ntb pointer, return the maximum number of callbacks
+ *
+ * RETURNS: the maximum number of callbacks
+ */
+static inline unsigned char ntb_max_cbs(struct ntb_device *ndev)
+{
+ return ndev->max_cbs;
+}
+
+/**
+ * ntb_max_mw() - return the max number of memory windows
+ * @ndev: pointer to ntb_device instance
+ *
+ * Given the ntb pointer, return the maximum number of memory windows
+ *
+ * RETURNS: the maximum number of memory windows
+ */
+static inline unsigned char ntb_max_mw(struct ntb_device *ndev)
+{
+ return ndev->limits.max_mw;
+}
+
+/**
* ntb_hw_link_status() - return the hardware link status
* @ndev: pointer to ntb_device instance
*
@@ -146,7 +200,7 @@ static inline bool ntb_hw_link_status(struct ntb_device *ndev)
* ntb_query_pdev() - return the pci_dev pointer
* @ndev: pointer to ntb_device instance
*
- * Given the ntb pointer return the pci_dev pointerfor the NTB hardware device
+ * Given the ntb pointer, return the pci_dev pointer for the NTB hardware device
*
* RETURNS: a pointer to the ntb pci_dev
*/
@@ -155,6 +209,20 @@ static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
return ndev->pdev;
}
+/**
+ * ntb_query_debugfs() - return the debugfs pointer
+ * @ndev: pointer to ntb_device instance
+ *
+ * Given the ntb pointer, return the debugfs directory pointer for the NTB
+ * hardware device
+ *
+ * RETURNS: a pointer to the debugfs directory
+ */
+static inline struct dentry *ntb_query_debugfs(struct ntb_device *ndev)
+{
+ return ndev->debugfs_dir;
+}
+
struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
void *transport);
void ntb_unregister_transport(struct ntb_device *ndev);
@@ -172,9 +240,10 @@ int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw);
void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
-resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
-void ntb_ring_sdb(struct ntb_device *ndev, unsigned int idx);
+u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
+void ntb_ring_doorbell(struct ntb_device *ndev, unsigned int idx);
void *ntb_find_transport(struct pci_dev *pdev);
int ntb_transport_init(struct pci_dev *pdev);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
index 5bfa8c0..aa4bdd3 100644
--- a/drivers/ntb/ntb_regs.h
+++ b/drivers/ntb/ntb_regs.h
@@ -46,23 +46,24 @@
* Jon Mason <jon.mason@intel.com>
*/
-#define NTB_LINK_ENABLE 0x0000
-#define NTB_LINK_DISABLE 0x0002
#define NTB_LINK_STATUS_ACTIVE 0x2000
#define NTB_LINK_SPEED_MASK 0x000f
#define NTB_LINK_WIDTH_MASK 0x03f0
#define SNB_MSIX_CNT 4
-#define SNB_MAX_SPADS 16
-#define SNB_MAX_COMPAT_SPADS 8
+#define SNB_MAX_B2B_SPADS 16
+#define SNB_MAX_COMPAT_SPADS 16
/* Reserve the uppermost bit for link interrupt */
#define SNB_MAX_DB_BITS 15
#define SNB_DB_BITS_PER_VEC 5
+#define SNB_MAX_MW 2
+#define SNB_ERRATA_MAX_MW 1
#define SNB_DB_HW_LINK 0x8000
#define SNB_PCICMD_OFFSET 0x0504
#define SNB_DEVCTRL_OFFSET 0x0598
+#define SNB_SLINK_STATUS_OFFSET 0x05A2
#define SNB_LINK_STATUS_OFFSET 0x01A2
#define SNB_PBAR2LMT_OFFSET 0x0000
@@ -74,6 +75,9 @@
#define SNB_SBAR2XLAT_OFFSET 0x0030
#define SNB_SBAR4XLAT_OFFSET 0x0038
#define SNB_SBAR0BASE_OFFSET 0x0040
+#define SNB_SBAR0BASE_OFFSET 0x0040
+#define SNB_SBAR2BASE_OFFSET 0x0048
+#define SNB_SBAR4BASE_OFFSET 0x0050
#define SNB_SBAR2BASE_OFFSET 0x0048
#define SNB_SBAR4BASE_OFFSET 0x0050
#define SNB_NTBCNTL_OFFSET 0x0058
@@ -88,19 +92,28 @@
#define SNB_WCCNTRL_OFFSET 0x00e0
#define SNB_B2B_SPAD_OFFSET 0x0100
#define SNB_B2B_DOORBELL_OFFSET 0x0140
-#define SNB_B2B_XLAT_OFFSET 0x0144
+#define SNB_B2B_XLAT_OFFSETL 0x0144
+#define SNB_B2B_XLAT_OFFSETU 0x0148
+
+#define SNB_MBAR01_USD_ADDR 0x000000210000000CULL
+#define SNB_MBAR23_USD_ADDR 0x000000410000000CULL
+#define SNB_MBAR45_USD_ADDR 0x000000810000000CULL
+#define SNB_MBAR01_DSD_ADDR 0x000000200000000CULL
+#define SNB_MBAR23_DSD_ADDR 0x000000400000000CULL
+#define SNB_MBAR45_DSD_ADDR 0x000000800000000CULL
#define BWD_MSIX_CNT 34
#define BWD_MAX_SPADS 16
-#define BWD_MAX_COMPAT_SPADS 16
#define BWD_MAX_DB_BITS 34
#define BWD_DB_BITS_PER_VEC 1
+#define BWD_MAX_MW 2
#define BWD_PCICMD_OFFSET 0xb004
#define BWD_MBAR23_OFFSET 0xb018
#define BWD_MBAR45_OFFSET 0xb020
#define BWD_DEVCTRL_OFFSET 0xb048
#define BWD_LINK_STATUS_OFFSET 0xb052
+#define BWD_ERRCORSTS_OFFSET 0xb110
#define BWD_SBAR2XLAT_OFFSET 0x0008
#define BWD_SBAR4XLAT_OFFSET 0x0010
@@ -118,6 +131,22 @@
#define BWD_B2B_SPADSEMA_OFFSET 0x80c0
#define BWD_B2B_STKYSPAD_OFFSET 0x80c4
+#define BWD_MODPHY_PCSREG4 0x1c004
+#define BWD_MODPHY_PCSREG6 0x1c006
+
+#define BWD_IP_BASE 0xC000
+#define BWD_DESKEWSTS_OFFSET (BWD_IP_BASE + 0x3024)
+#define BWD_LTSSMERRSTS0_OFFSET (BWD_IP_BASE + 0x3180)
+#define BWD_LTSSMSTATEJMP_OFFSET (BWD_IP_BASE + 0x3040)
+#define BWD_IBSTERRRCRVSTS0_OFFSET (BWD_IP_BASE + 0x3324)
+
+#define BWD_DESKEWSTS_DBERR (1 << 15)
+#define BWD_LTSSMERRSTS0_UNEXPECTEDEI (1 << 20)
+#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2)
+#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF
+
+#define NTB_CNTL_CFG_LOCK (1 << 0)
+#define NTB_CNTL_LINK_DISABLE (1 << 1)
#define NTB_CNTL_BAR23_SNOOP (1 << 2)
#define NTB_CNTL_BAR45_SNOOP (1 << 6)
#define BWD_CNTL_LINK_DOWN (1 << 16)
@@ -128,12 +157,3 @@
#define BWD_PPD_INIT_LINK 0x0008
#define BWD_PPD_CONN_TYPE 0x0300
#define BWD_PPD_DEV_TYPE 0x1000
-
-#define BWD_PBAR2XLAT_USD_ADDR 0x0000004000000000
-#define BWD_PBAR4XLAT_USD_ADDR 0x0000008000000000
-#define BWD_MBAR23_USD_ADDR 0x000000410000000C
-#define BWD_MBAR45_USD_ADDR 0x000000810000000C
-#define BWD_PBAR2XLAT_DSD_ADDR 0x0000004100000000
-#define BWD_PBAR4XLAT_DSD_ADDR 0x0000008100000000
-#define BWD_MBAR23_DSD_ADDR 0x000000400000000C
-#define BWD_MBAR45_DSD_ADDR 0x000000800000000C
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index f8d7081..12a9e83 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -47,6 +47,7 @@
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
@@ -64,10 +65,14 @@ static unsigned int transport_mtu = 0x401E;
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
-static unsigned char max_num_clients = 2;
+static unsigned char max_num_clients;
module_param(max_num_clients, byte, 0644);
MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
+static unsigned int copy_bytes = 1024;
+module_param(copy_bytes, uint, 0644);
+MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
+
struct ntb_queue_entry {
/* ntb_queue list reference */
struct list_head entry;
@@ -76,6 +81,13 @@ struct ntb_queue_entry {
void *buf;
unsigned int len;
unsigned int flags;
+
+ struct ntb_transport_qp *qp;
+ union {
+ struct ntb_payload_header __iomem *tx_hdr;
+ struct ntb_payload_header *rx_hdr;
+ };
+ unsigned int index;
};
struct ntb_rx_info {
@@ -86,6 +98,7 @@ struct ntb_transport_qp {
struct ntb_transport *transport;
struct ntb_device *ndev;
void *cb_data;
+ struct dma_chan *dma_chan;
bool client_ready;
bool qp_link;
@@ -99,6 +112,7 @@ struct ntb_transport_qp {
struct list_head tx_free_q;
spinlock_t ntb_tx_free_q_lock;
void __iomem *tx_mw;
+ dma_addr_t tx_mw_phys;
unsigned int tx_index;
unsigned int tx_max_entry;
unsigned int tx_max_frame;
@@ -114,6 +128,7 @@ struct ntb_transport_qp {
unsigned int rx_index;
unsigned int rx_max_entry;
unsigned int rx_max_frame;
+ dma_cookie_t last_cookie;
void (*event_handler) (void *data, int status);
struct delayed_work link_work;
@@ -129,9 +144,14 @@ struct ntb_transport_qp {
u64 rx_err_no_buf;
u64 rx_err_oflow;
u64 rx_err_ver;
+ u64 rx_memcpy;
+ u64 rx_async;
u64 tx_bytes;
u64 tx_pkts;
u64 tx_ring_full;
+ u64 tx_err_no_buf;
+ u64 tx_memcpy;
+ u64 tx_async;
};
struct ntb_transport_mw {
@@ -150,14 +170,13 @@ struct ntb_transport {
struct list_head client_devs;
struct ntb_device *ndev;
- struct ntb_transport_mw mw[NTB_NUM_MW];
+ struct ntb_transport_mw *mw;
struct ntb_transport_qp *qps;
unsigned int max_qps;
unsigned long qp_bitmap;
bool transport_link;
struct delayed_work link_work;
struct work_struct link_cleanup;
- struct dentry *debugfs_dir;
};
enum {
@@ -183,7 +202,7 @@ enum {
MAX_SPAD,
};
-#define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
+#define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
#define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10
@@ -382,7 +401,7 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
char *buf;
ssize_t ret, out_offset, out_count;
- out_count = 600;
+ out_count = 1000;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
@@ -397,6 +416,10 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_pkts - \t%llu\n", qp->rx_pkts);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_memcpy - \t%llu\n", qp->rx_memcpy);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_async - \t%llu\n", qp->rx_async);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_ring_empty - %llu\n", qp->rx_ring_empty);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
@@ -416,8 +439,14 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_pkts - \t%llu\n", qp->tx_pkts);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_memcpy - \t%llu\n", qp->tx_memcpy);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_async - \t%llu\n", qp->tx_async);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_ring_full - \t%llu\n", qp->tx_ring_full);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_mw - \t%p\n", qp->tx_mw);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_index - \t%u\n", qp->tx_index);
@@ -475,22 +504,25 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
{
struct ntb_transport_qp *qp = &nt->qps[qp_num];
unsigned int rx_size, num_qps_mw;
- u8 mw_num = QP_TO_MW(qp_num);
+ u8 mw_num, mw_max;
unsigned int i;
+ mw_max = ntb_max_mw(nt->ndev);
+ mw_num = QP_TO_MW(nt->ndev, qp_num);
+
WARN_ON(nt->mw[mw_num].virt_addr == NULL);
- if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
- num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+ if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+ num_qps_mw = nt->max_qps / mw_max + 1;
else
- num_qps_mw = nt->max_qps / NTB_NUM_MW;
+ num_qps_mw = nt->max_qps / mw_max;
rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
- qp->remote_rx_info = nt->mw[mw_num].virt_addr +
- (qp_num / NTB_NUM_MW * rx_size);
+ qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
rx_size -= sizeof(struct ntb_rx_info);
- qp->rx_buff = qp->remote_rx_info + 1;
+ qp->remote_rx_info = qp->rx_buff + rx_size;
+
/* Due to housekeeping, there must be atleast 2 buffs */
qp->rx_max_frame = min(transport_mtu, rx_size / 2);
qp->rx_max_entry = rx_size / qp->rx_max_frame;
@@ -631,7 +663,7 @@ static void ntb_transport_link_work(struct work_struct *work)
int rc, i;
/* send the local info, in the opposite order of the way we read it */
- for (i = 0; i < NTB_NUM_MW; i++) {
+ for (i = 0; i < ntb_max_mw(ndev); i++) {
rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
ntb_get_mw_size(ndev, i) >> 32);
if (rc) {
@@ -651,10 +683,10 @@ static void ntb_transport_link_work(struct work_struct *work)
}
}
- rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
+ rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
if (rc) {
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- NTB_NUM_MW, NUM_MWS);
+ ntb_max_mw(ndev), NUM_MWS);
goto out;
}
@@ -699,11 +731,11 @@ static void ntb_transport_link_work(struct work_struct *work)
goto out;
}
- if (val != NTB_NUM_MW)
+ if (val != ntb_max_mw(ndev))
goto out;
dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
- for (i = 0; i < NTB_NUM_MW; i++) {
+ for (i = 0; i < ntb_max_mw(ndev); i++) {
u64 val64;
rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
@@ -745,7 +777,7 @@ static void ntb_transport_link_work(struct work_struct *work)
return;
out1:
- for (i = 0; i < NTB_NUM_MW; i++)
+ for (i = 0; i < ntb_max_mw(ndev); i++)
ntb_free_mw(nt, i);
out:
if (ntb_hw_link_status(ndev))
@@ -794,12 +826,16 @@ static void ntb_qp_link_work(struct work_struct *work)
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
-static void ntb_transport_init_queue(struct ntb_transport *nt,
+static int ntb_transport_init_queue(struct ntb_transport *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp;
unsigned int num_qps_mw, tx_size;
- u8 mw_num = QP_TO_MW(qp_num);
+ u8 mw_num, mw_max;
+ u64 qp_offset;
+
+ mw_max = ntb_max_mw(nt->ndev);
+ mw_num = QP_TO_MW(nt->ndev, qp_num);
qp = &nt->qps[qp_num];
qp->qp_num = qp_num;
@@ -809,27 +845,34 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
qp->client_ready = NTB_LINK_DOWN;
qp->event_handler = NULL;
- if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
- num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+ if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
+ num_qps_mw = nt->max_qps / mw_max + 1;
else
- num_qps_mw = nt->max_qps / NTB_NUM_MW;
+ num_qps_mw = nt->max_qps / mw_max;
tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
- qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) +
- (qp_num / NTB_NUM_MW * tx_size);
+ qp_offset = qp_num / mw_max * tx_size;
+ qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
+ if (!qp->tx_mw)
+ return -EINVAL;
+
+ qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
+ if (!qp->tx_mw_phys)
+ return -EINVAL;
+
tx_size -= sizeof(struct ntb_rx_info);
+ qp->rx_info = qp->tx_mw + tx_size;
- qp->tx_mw = qp->rx_info + 1;
/* Due to housekeeping, there must be atleast 2 buffs */
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
- if (nt->debugfs_dir) {
+ if (ntb_query_debugfs(nt->ndev)) {
char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name,
- nt->debugfs_dir);
+ ntb_query_debugfs(nt->ndev));
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp,
@@ -846,6 +889,8 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
INIT_LIST_HEAD(&qp->rx_pend_q);
INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q);
+
+ return 0;
}
int ntb_transport_init(struct pci_dev *pdev)
@@ -857,30 +902,38 @@ int ntb_transport_init(struct pci_dev *pdev)
if (!nt)
return -ENOMEM;
- if (debugfs_initialized())
- nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- else
- nt->debugfs_dir = NULL;
-
nt->ndev = ntb_register_transport(pdev, nt);
if (!nt->ndev) {
rc = -EIO;
goto err;
}
- nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
+ nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
+ GFP_KERNEL);
+ if (!nt->mw) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ if (max_num_clients)
+ nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
+ else
+ nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
GFP_KERNEL);
if (!nt->qps) {
rc = -ENOMEM;
- goto err1;
+ goto err2;
}
nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
- for (i = 0; i < nt->max_qps; i++)
- ntb_transport_init_queue(nt, i);
+ for (i = 0; i < nt->max_qps; i++) {
+ rc = ntb_transport_init_queue(nt, i);
+ if (rc)
+ goto err3;
+ }
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
@@ -888,26 +941,27 @@ int ntb_transport_init(struct pci_dev *pdev)
rc = ntb_register_event_callback(nt->ndev,
ntb_transport_event_callback);
if (rc)
- goto err2;
+ goto err3;
INIT_LIST_HEAD(&nt->client_devs);
rc = ntb_bus_init(nt);
if (rc)
- goto err3;
+ goto err4;
if (ntb_hw_link_status(nt->ndev))
schedule_delayed_work(&nt->link_work, 0);
return 0;
-err3:
+err4:
ntb_unregister_event_callback(nt->ndev);
-err2:
+err3:
kfree(nt->qps);
+err2:
+ kfree(nt->mw);
err1:
ntb_unregister_transport(nt->ndev);
err:
- debugfs_remove_recursive(nt->debugfs_dir);
kfree(nt);
return rc;
}
@@ -915,41 +969,46 @@ err:
void ntb_transport_free(void *transport)
{
struct ntb_transport *nt = transport;
- struct pci_dev *pdev;
+ struct ntb_device *ndev = nt->ndev;
int i;
nt->transport_link = NTB_LINK_DOWN;
/* verify that all the qp's are freed */
- for (i = 0; i < nt->max_qps; i++)
+ for (i = 0; i < nt->max_qps; i++) {
if (!test_bit(i, &nt->qp_bitmap))
ntb_transport_free_queue(&nt->qps[i]);
+ debugfs_remove_recursive(nt->qps[i].debugfs_dir);
+ }
ntb_bus_remove(nt);
cancel_delayed_work_sync(&nt->link_work);
- debugfs_remove_recursive(nt->debugfs_dir);
-
- ntb_unregister_event_callback(nt->ndev);
-
- pdev = ntb_query_pdev(nt->ndev);
+ ntb_unregister_event_callback(ndev);
- for (i = 0; i < NTB_NUM_MW; i++)
+ for (i = 0; i < ntb_max_mw(ndev); i++)
ntb_free_mw(nt, i);
kfree(nt->qps);
- ntb_unregister_transport(nt->ndev);
+ kfree(nt->mw);
+ ntb_unregister_transport(ndev);
kfree(nt);
}
-static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
- struct ntb_queue_entry *entry, void *offset)
+static void ntb_rx_copy_callback(void *data)
{
+ struct ntb_queue_entry *entry = data;
+ struct ntb_transport_qp *qp = entry->qp;
void *cb_data = entry->cb_data;
unsigned int len = entry->len;
+ struct ntb_payload_header *hdr = entry->rx_hdr;
- memcpy(entry->buf, offset, entry->len);
+ /* Ensure that the data is fully copied out before clearing the flag */
+ wmb();
+ hdr->flags = 0;
+
+ iowrite32(entry->index, &qp->rx_info->entry);
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
@@ -957,6 +1016,86 @@ static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
qp->rx_handler(qp, qp->cb_data, cb_data, len);
}
+static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
+{
+ void *buf = entry->buf;
+ size_t len = entry->len;
+
+ memcpy(buf, offset, len);
+
+ ntb_rx_copy_callback(entry);
+}
+
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
+ size_t len)
+{
+ struct dma_async_tx_descriptor *txd;
+ struct ntb_transport_qp *qp = entry->qp;
+ struct dma_chan *chan = qp->dma_chan;
+ struct dma_device *device;
+ size_t pay_off, buff_off;
+ dma_addr_t src, dest;
+ dma_cookie_t cookie;
+ void *buf = entry->buf;
+ unsigned long flags;
+
+ entry->len = len;
+
+ if (!chan)
+ goto err;
+
+ if (len < copy_bytes)
+ goto err1;
+
+ device = chan->device;
+ pay_off = (size_t) offset & ~PAGE_MASK;
+ buff_off = (size_t) buf & ~PAGE_MASK;
+
+ if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
+ goto err1;
+
+ dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(device->dev, dest))
+ goto err1;
+
+ src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(device->dev, src))
+ goto err2;
+
+ flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
+ DMA_PREP_INTERRUPT;
+ txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+ if (!txd)
+ goto err3;
+
+ txd->callback = ntb_rx_copy_callback;
+ txd->callback_param = entry;
+
+ cookie = dmaengine_submit(txd);
+ if (dma_submit_error(cookie))
+ goto err3;
+
+ qp->last_cookie = cookie;
+
+ qp->rx_async++;
+
+ return;
+
+err3:
+ dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
+err2:
+ dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
+err1:
+ /* If the callbacks come out of order, the writing of the index to the
+ * last completed will be out of order. This may result in the
+ * receive stalling forever.
+ */
+ dma_sync_wait(chan, qp->last_cookie);
+err:
+ ntb_memcpy_rx(entry, offset);
+ qp->rx_memcpy++;
+}
+
static int ntb_process_rxc(struct ntb_transport_qp *qp)
{
struct ntb_payload_header *hdr;
@@ -995,41 +1134,45 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
if (hdr->flags & LINK_DOWN_FLAG) {
ntb_qp_link_down(qp);
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
- &qp->rx_pend_q);
- goto out;
+ goto err;
}
dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
"rx offset %u, ver %u - %d payload received, buf size %d\n",
qp->rx_index, hdr->ver, hdr->len, entry->len);
- if (hdr->len <= entry->len) {
- entry->len = hdr->len;
- ntb_rx_copy_task(qp, entry, offset);
- } else {
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
- &qp->rx_pend_q);
+ qp->rx_bytes += hdr->len;
+ qp->rx_pkts++;
+ if (hdr->len > entry->len) {
qp->rx_err_oflow++;
dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
"RX overflow! Wanted %d got %d\n",
hdr->len, entry->len);
+
+ goto err;
}
- qp->rx_bytes += hdr->len;
- qp->rx_pkts++;
+ entry->index = qp->rx_index;
+ entry->rx_hdr = hdr;
+
+ ntb_async_rx(entry, offset, hdr->len);
out:
+ qp->rx_index++;
+ qp->rx_index %= qp->rx_max_entry;
+
+ return 0;
+
+err:
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
/* Ensure that the data is fully copied out before clearing the flag */
wmb();
hdr->flags = 0;
iowrite32(qp->rx_index, &qp->rx_info->entry);
- qp->rx_index++;
- qp->rx_index %= qp->rx_max_entry;
-
- return 0;
+ goto out;
}
static void ntb_transport_rx(unsigned long data)
@@ -1045,6 +1188,9 @@ static void ntb_transport_rx(unsigned long data)
if (rc)
break;
}
+
+ if (qp->dma_chan)
+ dma_async_issue_pending(qp->dma_chan);
}
static void ntb_transport_rxc_db(void *data, int db_num)
@@ -1057,23 +1203,17 @@ static void ntb_transport_rxc_db(void *data, int db_num)
tasklet_schedule(&qp->rx_work);
}
-static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
- struct ntb_queue_entry *entry,
- void __iomem *offset)
+static void ntb_tx_copy_callback(void *data)
{
- struct ntb_payload_header __iomem *hdr;
+ struct ntb_queue_entry *entry = data;
+ struct ntb_transport_qp *qp = entry->qp;
+ struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
- memcpy_toio(offset, entry->buf, entry->len);
-
- hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
- iowrite32(entry->len, &hdr->len);
- iowrite32((u32) qp->tx_pkts, &hdr->ver);
-
- /* Ensure that the data is fully copied out before setting the flag */
+ /* Ensure that the data is fully copied out before setting the flags */
wmb();
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
- ntb_ring_sdb(qp->ndev, qp->qp_num);
+ ntb_ring_doorbell(qp->ndev, qp->qp_num);
/* The entry length can only be zero if the packet is intended to be a
* "link down" or similar. Since no payload is being sent in these
@@ -1090,15 +1230,81 @@ static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
}
-static int ntb_process_tx(struct ntb_transport_qp *qp,
- struct ntb_queue_entry *entry)
+static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
+{
+ memcpy_toio(offset, entry->buf, entry->len);
+
+ ntb_tx_copy_callback(entry);
+}
+
+static void ntb_async_tx(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
{
+ struct ntb_payload_header __iomem *hdr;
+ struct dma_async_tx_descriptor *txd;
+ struct dma_chan *chan = qp->dma_chan;
+ struct dma_device *device;
+ size_t dest_off, buff_off;
+ dma_addr_t src, dest;
+ dma_cookie_t cookie;
void __iomem *offset;
+ size_t len = entry->len;
+ void *buf = entry->buf;
+ unsigned long flags;
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
+ hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ entry->tx_hdr = hdr;
+
+ iowrite32(entry->len, &hdr->len);
+ iowrite32((u32) qp->tx_pkts, &hdr->ver);
+
+ if (!chan)
+ goto err;
+
+ if (len < copy_bytes)
+ goto err;
+
+ device = chan->device;
+ dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
+ buff_off = (size_t) buf & ~PAGE_MASK;
+ dest_off = (size_t) dest & ~PAGE_MASK;
+
+ if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
+ goto err;
+
+ src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(device->dev, src))
+ goto err;
+
+ flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
+ txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+ if (!txd)
+ goto err1;
+
+ txd->callback = ntb_tx_copy_callback;
+ txd->callback_param = entry;
+
+ cookie = dmaengine_submit(txd);
+ if (dma_submit_error(cookie))
+ goto err1;
+
+ dma_async_issue_pending(chan);
+ qp->tx_async++;
- dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
- qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags,
+ return;
+err1:
+ dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
+err:
+ ntb_memcpy_tx(entry, offset);
+ qp->tx_memcpy++;
+}
+
+static int ntb_process_tx(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
+{
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
+ qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
entry->buf);
if (qp->tx_index == qp->remote_rx_info->entry) {
qp->tx_ring_full++;
@@ -1114,7 +1320,7 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
return 0;
}
- ntb_tx_copy_task(qp, entry, offset);
+ ntb_async_tx(qp, entry);
qp->tx_index++;
qp->tx_index %= qp->tx_max_entry;
@@ -1200,11 +1406,18 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
qp->tx_handler = handlers->tx_handler;
qp->event_handler = handlers->event_handler;
+ qp->dma_chan = dma_find_channel(DMA_MEMCPY);
+ if (!qp->dma_chan)
+ dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
+ else
+ dmaengine_get();
+
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
if (!entry)
goto err1;
+ entry->qp = qp;
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
&qp->rx_free_q);
}
@@ -1214,6 +1427,7 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
if (!entry)
goto err2;
+ entry->qp = qp;
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
&qp->tx_free_q);
}
@@ -1259,11 +1473,26 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
pdev = ntb_query_pdev(qp->ndev);
- cancel_delayed_work_sync(&qp->link_work);
+ if (qp->dma_chan) {
+ struct dma_chan *chan = qp->dma_chan;
+ /* Putting the dma_chan to NULL will force any new traffic to be
+ * processed by the CPU instead of the DAM engine
+ */
+ qp->dma_chan = NULL;
+
+ /* Try to be nice and wait for any queued DMA engine
+ * transactions to process before smashing it with a rock
+ */
+ dma_sync_wait(chan, qp->last_cookie);
+ dmaengine_terminate_all(chan);
+ dmaengine_put();
+ }
ntb_unregister_db_callback(qp->ndev, qp->qp_num);
tasklet_disable(&qp->rx_work);
+ cancel_delayed_work_sync(&qp->link_work);
+
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
kfree(entry);
@@ -1354,7 +1583,7 @@ EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
* @len: length of the data buffer
*
* Enqueue a new transmit buffer onto the transport queue from which a NTB
- * payload will be transmitted. This assumes that a lock is behing held to
+ * payload will be transmitted. This assumes that a lock is being held to
* serialize access to the qp.
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
@@ -1369,8 +1598,10 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
return -EINVAL;
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
- if (!entry)
+ if (!entry) {
+ qp->tx_err_no_buf++;
return -ENOMEM;
+ }
entry->cb_data = cb;
entry->buf = data;
@@ -1410,7 +1641,7 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
*
* Notify NTB transport layer of client's desire to no longer receive data on
* transport queue specified. It is the client's responsibility to ensure all
- * entries on queue are purged or otherwise handled appropraitely.
+ * entries on queue are purged or otherwise handled appropriately.
*/
void ntb_transport_link_down(struct ntb_transport_qp *qp)
{
@@ -1486,9 +1717,18 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
*/
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
{
+ unsigned int max;
+
if (!qp)
return 0;
- return qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ if (!qp->dma_chan)
+ return qp->tx_max_frame - sizeof(struct ntb_payload_header);
+
+ /* If DMA engine usage is possible, try to find the max size for that */
+ max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ max -= max % (1 << qp->dma_chan->device->copy_align);
+
+ return max;
}
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 78cc760..9d2009a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,4 +74,10 @@ config OF_MTD
depends on MTD
def_bool y
+config OF_RESERVED_MEM
+ depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK))
+ def_bool y
+ help
+ Initialization code for DMA reserved memory
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index efd0510..ed9660a 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
obj-$(CONFIG_OF_MTD) += of_mtd.o
+obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index e486e41..865d3f6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1176,65 +1176,10 @@ int of_property_count_strings(struct device_node *np, const char *propname)
}
EXPORT_SYMBOL_GPL(of_property_count_strings);
-/**
- * of_parse_phandle - Resolve a phandle property to a device_node pointer
- * @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a phandle value
- * @index: For properties holding a table of phandles, this is the index into
- * the table
- *
- * Returns the device_node pointer with refcount incremented. Use
- * of_node_put() on it when done.
- */
-struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name, int index)
-{
- const __be32 *phandle;
- int size;
-
- phandle = of_get_property(np, phandle_name, &size);
- if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
- return NULL;
-
- return of_find_node_by_phandle(be32_to_cpup(phandle + index));
-}
-EXPORT_SYMBOL(of_parse_phandle);
-
-/**
- * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
- * @np: pointer to a device tree node containing a list
- * @list_name: property name that contains a list
- * @cells_name: property name that specifies phandles' arguments count
- * @index: index of a phandle to parse out
- * @out_args: optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->node
- * pointer.
- *
- * Example:
- *
- * phandle1: node1 {
- * #list-cells = <2>;
- * }
- *
- * phandle2: node2 {
- * #list-cells = <1>;
- * }
- *
- * node3 {
- * list = <&phandle1 1 2 &phandle2 3>;
- * }
- *
- * To get a device_node of the `node2' node you may call this:
- * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
- */
static int __of_parse_phandle_with_args(const struct device_node *np,
const char *list_name,
- const char *cells_name, int index,
+ const char *cells_name,
+ int cell_count, int index,
struct of_phandle_args *out_args)
{
const __be32 *list, *list_end;
@@ -1262,19 +1207,32 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
if (phandle) {
/*
* Find the provider node and parse the #*-cells
- * property to determine the argument length
+ * property to determine the argument length.
+ *
+ * This is not needed if the cell count is hard-coded
+ * (i.e. cells_name not set, but cell_count is set),
+ * except when we're going to return the found node
+ * below.
*/
- node = of_find_node_by_phandle(phandle);
- if (!node) {
- pr_err("%s: could not find phandle\n",
- np->full_name);
- goto err;
+ if (cells_name || cur_index == index) {
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find phandle\n",
+ np->full_name);
+ goto err;
+ }
}
- if (of_property_read_u32(node, cells_name, &count)) {
- pr_err("%s: could not get %s for %s\n",
- np->full_name, cells_name,
- node->full_name);
- goto err;
+
+ if (cells_name) {
+ if (of_property_read_u32(node, cells_name,
+ &count)) {
+ pr_err("%s: could not get %s for %s\n",
+ np->full_name, cells_name,
+ node->full_name);
+ goto err;
+ }
+ } else {
+ count = cell_count;
}
/*
@@ -1334,17 +1292,117 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
return rc;
}
+/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Returns the device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name, int index)
+{
+ struct of_phandle_args args;
+
+ if (index < 0)
+ return NULL;
+
+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+ index, &args))
+ return NULL;
+
+ return args.np;
+}
+EXPORT_SYMBOL(of_parse_phandle);
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * }
+ *
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * }
+ *
+ * node3 {
+ * list = <&phandle1 1 2 &phandle2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name, int index,
struct of_phandle_args *out_args)
{
if (index < 0)
return -EINVAL;
- return __of_parse_phandle_with_args(np, list_name, cells_name, index, out_args);
+ return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
+ index, out_args);
}
EXPORT_SYMBOL(of_parse_phandle_with_args);
/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * }
+ *
+ * phandle2: node2 {
+ * }
+ *
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cell_count,
+ int index, struct of_phandle_args *out_args)
+{
+ if (index < 0)
+ return -EINVAL;
+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+ index, out_args);
+}
+EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
+
+/**
* of_count_phandle_with_args() - Find the number of phandles references in a property
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
@@ -1362,7 +1420,8 @@ EXPORT_SYMBOL(of_parse_phandle_with_args);
int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name)
{
- return __of_parse_phandle_with_args(np, list_name, cells_name, -1, NULL);
+ return __of_parse_phandle_with_args(np, list_name, cells_name, 0, -1,
+ NULL);
}
EXPORT_SYMBOL(of_count_phandle_with_args);
@@ -1734,6 +1793,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
ap = dt_alloc(sizeof(*ap) + len + 1, 4);
if (!ap)
continue;
+ memset(ap, 0, sizeof(*ap) + len + 1);
ap->alias = start;
of_alias_add(ap, np, id, start, len);
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b10ba00..229dd9d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,12 +11,14 @@
#include <linux/kernel.h>
#include <linux/initrd.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#ifdef CONFIG_PPC
@@ -125,13 +127,13 @@ int of_fdt_match(struct boot_param_header *blob, unsigned long node,
return score;
}
-static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+static void *unflatten_dt_alloc(void **mem, unsigned long size,
unsigned long align)
{
void *res;
- *mem = ALIGN(*mem, align);
- res = (void *)*mem;
+ *mem = PTR_ALIGN(*mem, align);
+ res = *mem;
*mem += size;
return res;
@@ -146,9 +148,9 @@ static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
* @allnextpp: pointer to ->allnext from last allocated device_node
* @fpsize: Size of the node path up at the current depth.
*/
-static unsigned long unflatten_dt_node(struct boot_param_header *blob,
- unsigned long mem,
- unsigned long *p,
+static void * unflatten_dt_node(struct boot_param_header *blob,
+ void *mem,
+ void **p,
struct device_node *dad,
struct device_node ***allnextpp,
unsigned long fpsize)
@@ -161,15 +163,15 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
int has_name = 0;
int new_format = 0;
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
if (tag != OF_DT_BEGIN_NODE) {
pr_err("Weird tag at start of node: %x\n", tag);
return mem;
}
*p += 4;
- pathp = (char *)*p;
+ pathp = *p;
l = allocl = strlen(pathp) + 1;
- *p = ALIGN(*p + l, 4);
+ *p = PTR_ALIGN(*p + l, 4);
/* version 0x10 has a more compact unit name here instead of the full
* path. we accumulate the full path size using "fpsize", we'll rebuild
@@ -201,7 +203,6 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
__alignof__(struct device_node));
if (allnextpp) {
char *fn;
- memset(np, 0, sizeof(*np));
np->full_name = fn = ((char *)np) + sizeof(*np);
if (new_format) {
/* rebuild full path for new format */
@@ -239,7 +240,7 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
u32 sz, noff;
char *pname;
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
if (tag == OF_DT_NOP) {
*p += 4;
continue;
@@ -247,11 +248,11 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
if (tag != OF_DT_PROP)
break;
*p += 4;
- sz = be32_to_cpup((__be32 *)(*p));
- noff = be32_to_cpup((__be32 *)((*p) + 4));
+ sz = be32_to_cpup(*p);
+ noff = be32_to_cpup(*p + 4);
*p += 8;
if (be32_to_cpu(blob->version) < 0x10)
- *p = ALIGN(*p, sz >= 8 ? 8 : 4);
+ *p = PTR_ALIGN(*p, sz >= 8 ? 8 : 4);
pname = of_fdt_get_string(blob, noff);
if (pname == NULL) {
@@ -281,11 +282,11 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
np->phandle = be32_to_cpup((__be32 *)*p);
pp->name = pname;
pp->length = sz;
- pp->value = (void *)*p;
+ pp->value = *p;
*prev_pp = pp;
prev_pp = &pp->next;
}
- *p = ALIGN((*p) + sz, 4);
+ *p = PTR_ALIGN((*p) + sz, 4);
}
/* with version 0x10 we may not have the name property, recreate
* it here from the unit name if absent
@@ -334,7 +335,7 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
else
mem = unflatten_dt_node(blob, mem, p, np, allnextpp,
fpsize);
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
}
if (tag != OF_DT_END_NODE) {
pr_err("Weird tag at end of node: %x\n", tag);
@@ -360,7 +361,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
struct device_node **mynodes,
void * (*dt_alloc)(u64 size, u64 align))
{
- unsigned long start, mem, size;
+ unsigned long size;
+ void *start, *mem;
struct device_node **allnextp = mynodes;
pr_debug(" -> unflatten_device_tree()\n");
@@ -381,32 +383,28 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
}
/* First pass, scan for size */
- start = ((unsigned long)blob) +
- be32_to_cpu(blob->off_dt_struct);
- size = unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
- size = (size | 3) + 1;
+ start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct);
+ size = (unsigned long)unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
+ size = ALIGN(size, 4);
pr_debug(" size is %lx, allocating...\n", size);
/* Allocate memory for the expanded device tree */
- mem = (unsigned long)
- dt_alloc(size + 4, __alignof__(struct device_node));
+ mem = dt_alloc(size + 4, __alignof__(struct device_node));
+ memset(mem, 0, size);
- memset((void *)mem, 0, size);
+ *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
- ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
-
- pr_debug(" unflattening %lx...\n", mem);
+ pr_debug(" unflattening %p...\n", mem);
/* Second pass, do actual unflattening */
- start = ((unsigned long)blob) +
- be32_to_cpu(blob->off_dt_struct);
+ start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct);
unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0);
- if (be32_to_cpup((__be32 *)start) != OF_DT_END)
- pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
- if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
+ if (be32_to_cpup(start) != OF_DT_END)
+ pr_warning("Weird tag at end of tree: %08x\n", be32_to_cpup(start));
+ if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warning("End of tree marker overwritten: %08x\n",
- be32_to_cpu(((__be32 *)mem)[size / 4]));
+ be32_to_cpup(mem + size));
*allnextp = NULL;
pr_debug(" <- unflatten_device_tree()\n");
@@ -545,6 +543,82 @@ int __init of_flat_dt_match(unsigned long node, const char *const *compat)
return of_fdt_match(initial_boot_params, node, compat);
}
+struct fdt_scan_status {
+ const char *name;
+ int namelen;
+ int depth;
+ int found;
+ int (*iterator)(unsigned long node, const char *uname, int depth, void *data);
+ void *data;
+};
+
+/**
+ * fdt_scan_node_by_path - iterator for of_scan_flat_dt_by_path function
+ */
+static int __init fdt_scan_node_by_path(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ struct fdt_scan_status *st = data;
+
+ /*
+ * if scan at the requested fdt node has been completed,
+ * return -ENXIO to abort further scanning
+ */
+ if (depth <= st->depth)
+ return -ENXIO;
+
+ /* requested fdt node has been found, so call iterator function */
+ if (st->found)
+ return st->iterator(node, uname, depth, st->data);
+
+ /* check if scanning automata is entering next level of fdt nodes */
+ if (depth == st->depth + 1 &&
+ strncmp(st->name, uname, st->namelen) == 0 &&
+ uname[st->namelen] == 0) {
+ st->depth += 1;
+ if (st->name[st->namelen] == 0) {
+ st->found = 1;
+ } else {
+ const char *next = st->name + st->namelen + 1;
+ st->name = next;
+ st->namelen = strcspn(next, "/");
+ }
+ return 0;
+ }
+
+ /* scan next fdt node */
+ return 0;
+}
+
+/**
+ * of_scan_flat_dt_by_path - scan flattened tree blob and call callback on each
+ * child of the given path.
+ * @path: path to start searching for children
+ * @it: callback function
+ * @data: context data pointer
+ *
+ * This function is used to scan the flattened device-tree starting from the
+ * node given by path. It is used to extract information (like reserved
+ * memory), which is required on ealy boot before we can unflatten the tree.
+ */
+int __init of_scan_flat_dt_by_path(const char *path,
+ int (*it)(unsigned long node, const char *name, int depth, void *data),
+ void *data)
+{
+ struct fdt_scan_status st = {path, 0, -1, 0, it, data};
+ int ret = 0;
+
+ if (initial_boot_params)
+ ret = of_scan_flat_dt(fdt_scan_node_by_path, &st);
+
+ if (!st.found)
+ return -ENOENT;
+ else if (ret == -ENXIO) /* scan has been completed */
+ return 0;
+ else
+ return ret;
+}
+
#ifdef CONFIG_BLK_DEV_INITRD
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
@@ -552,7 +626,8 @@ int __init of_flat_dt_match(unsigned long node, const char *const *compat)
*/
void __init early_init_dt_check_for_initrd(unsigned long node)
{
- unsigned long start, end, len;
+ u64 start, end;
+ unsigned long len;
__be32 *prop;
pr_debug("Looking for initrd properties... ");
@@ -560,15 +635,16 @@ void __init early_init_dt_check_for_initrd(unsigned long node)
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
if (!prop)
return;
- start = of_read_ulong(prop, len/4);
+ start = of_read_number(prop, len/4);
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
if (!prop)
return;
- end = of_read_ulong(prop, len/4);
+ end = of_read_number(prop, len/4);
early_init_dt_setup_initrd_arch(start, end);
- pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", start, end);
+ pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
+ (unsigned long long)start, (unsigned long long)end);
}
#else
inline void early_init_dt_check_for_initrd(unsigned long node)
@@ -698,6 +774,17 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
return 1;
}
+#ifdef CONFIG_HAVE_MEMBLOCK
+/*
+ * called from unflatten_device_tree() to bootstrap devicetree itself
+ * Architectures can override this definition if memblock isn't used
+ */
+void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+#endif
+
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
@@ -716,3 +803,14 @@ void __init unflatten_device_tree(void)
}
#endif /* CONFIG_OF_EARLY_FLATTREE */
+
+/* Feed entire flattened device tree into the random pool */
+static int __init add_fdt_randomness(void)
+{
+ if (initial_boot_params)
+ add_device_randomness(initial_boot_params,
+ be32_to_cpu(initial_boot_params->totalsize));
+
+ return 0;
+}
+core_initcall(add_fdt_randomness);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 1264923..1752988 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -28,7 +28,7 @@
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
- * @device: Device node of the device whose interrupt is to be mapped
+ * @dev: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
* This function is a wrapper that chains of_irq_map_one() and
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index ea174c8..8f9be2e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -39,7 +39,7 @@ static const char *phy_modes[] = {
* The function gets phy interface string from property 'phy-mode',
* and return its index in phy_modes table, or errno in error case.
*/
-const int of_get_phy_mode(struct device_node *np)
+int of_get_phy_mode(struct device_node *np)
{
const char *pm;
int err, i;
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
new file mode 100644
index 0000000..0fe40c7
--- /dev/null
+++ b/drivers/of/of_reserved_mem.c
@@ -0,0 +1,173 @@
+/*
+ * Device tree based initialization code for reserved memory.
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_reserved_mem.h>
+
+#define MAX_RESERVED_REGIONS 16
+struct reserved_mem {
+ phys_addr_t base;
+ unsigned long size;
+ struct cma *cma;
+ char name[32];
+};
+static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
+static int reserved_mem_count;
+
+static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
+ phys_addr_t base, size;
+ int is_cma, is_reserved;
+ unsigned long len;
+ const char *status;
+ __be32 *prop;
+
+ is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
+ of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
+ is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
+
+ if (!is_reserved && !is_cma) {
+ /* ignore node and scan next one */
+ return 0;
+ }
+
+ status = of_get_flat_dt_prop(node, "status", &len);
+ if (status && strcmp(status, "okay") != 0) {
+ /* ignore disabled node nad scan next one */
+ return 0;
+ }
+
+ prop = of_get_flat_dt_prop(node, "reg", &len);
+ if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
+ sizeof(__be32))) {
+ pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
+ uname);
+ /* ignore node and scan next one */
+ return 0;
+ }
+ base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ if (!size) {
+ /* ignore node and scan next one */
+ return 0;
+ }
+
+ pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
+ uname, (unsigned long)base, (unsigned long)size / SZ_1M);
+
+ if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
+ return -ENOSPC;
+
+ rmem->base = base;
+ rmem->size = size;
+ strlcpy(rmem->name, uname, sizeof(rmem->name));
+
+ if (is_cma) {
+ struct cma *cma;
+ if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
+ rmem->cma = cma;
+ reserved_mem_count++;
+ if (of_get_flat_dt_prop(node,
+ "linux,default-contiguous-region",
+ NULL))
+ dma_contiguous_set_default(cma);
+ }
+ } else if (is_reserved) {
+ if (memblock_remove(base, size) == 0)
+ reserved_mem_count++;
+ else
+ pr_err("Failed to reserve memory for %s\n", uname);
+ }
+
+ return 0;
+}
+
+static struct reserved_mem *get_dma_memory_region(struct device *dev)
+{
+ struct device_node *node;
+ const char *name;
+ int i;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node)
+ return NULL;
+
+ name = kbasename(node->full_name);
+ for (i = 0; i < reserved_mem_count; i++)
+ if (strcmp(name, reserved_mem[i].name) == 0)
+ return &reserved_mem[i];
+ return NULL;
+}
+
+/**
+ * of_reserved_mem_device_init() - assign reserved memory region to given device
+ *
+ * This function assign memory region pointed by "memory-region" device tree
+ * property to the given device.
+ */
+void of_reserved_mem_device_init(struct device *dev)
+{
+ struct reserved_mem *region = get_dma_memory_region(dev);
+ if (!region)
+ return;
+
+ if (region->cma) {
+ dev_set_cma_area(dev, region->cma);
+ pr_info("Assigned CMA %s to %s device\n", region->name,
+ dev_name(dev));
+ } else {
+ if (dma_declare_coherent_memory(dev, region->base, region->base,
+ region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
+ pr_info("Declared reserved memory %s to %s device\n",
+ region->name, dev_name(dev));
+ }
+}
+
+/**
+ * of_reserved_mem_device_release() - release reserved memory device structures
+ *
+ * This function releases structures allocated for memory region handling for
+ * the given device.
+ */
+void of_reserved_mem_device_release(struct device *dev)
+{
+ struct reserved_mem *region = get_dma_memory_region(dev);
+ if (!region && !region->cma)
+ dma_release_declared_memory(dev);
+}
+
+/**
+ * early_init_dt_scan_reserved_mem() - create reserved memory regions
+ *
+ * This function grabs memory from early allocator for device exclusive use
+ * defined in device tree structures. It should be called by arch specific code
+ * once the early allocator (memblock) has been activated and all other
+ * subsystems have already allocated/reserved memory.
+ */
+void __init early_init_dt_scan_reserved_mem(void)
+{
+ of_scan_flat_dt_by_path("/memory/reserved-memory",
+ fdt_scan_reserved_mem, NULL);
+}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index e0a6514..9b439ac 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -21,6 +21,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
const struct of_device_id of_default_bus_match_table[] = {
@@ -196,7 +197,7 @@ EXPORT_SYMBOL(of_device_alloc);
* Returns pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
-struct platform_device *of_platform_device_create_pdata(
+static struct platform_device *of_platform_device_create_pdata(
struct device_node *np,
const char *bus_id,
void *platform_data,
@@ -218,6 +219,8 @@ struct platform_device *of_platform_device_create_pdata(
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
+ of_reserved_mem_device_init(&dev->dev);
+
/* We do not fill the DMA ops for platform devices by default.
* This is currently the responsibility of the platform code
* to do such, possibly using a device notifier
@@ -225,6 +228,7 @@ struct platform_device *of_platform_device_create_pdata(
if (of_device_add(dev) != 0) {
platform_device_put(dev);
+ of_reserved_mem_device_release(&dev->dev);
return NULL;
}
@@ -264,8 +268,11 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
return NULL;
dev = amba_device_alloc(NULL, 0, 0);
- if (!dev)
+ if (!dev) {
+ pr_err("%s(): amba_device_alloc() failed for %s\n",
+ __func__, node->full_name);
return NULL;
+ }
/* setup generic device info */
dev->dev.coherent_dma_mask = ~0;
@@ -290,12 +297,18 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev->irq[i] = irq_of_parse_and_map(node, i);
ret = of_address_to_resource(node, 0, &dev->res);
- if (ret)
+ if (ret) {
+ pr_err("%s(): of_address_to_resource() failed (%d) for %s\n",
+ __func__, ret, node->full_name);
goto err_free;
+ }
ret = amba_device_add(dev, &iomem_resource);
- if (ret)
+ if (ret) {
+ pr_err("%s(): amba_device_add() failed (%d) for %s\n",
+ __func__, ret, node->full_name);
goto err_free;
+ }
return dev;
@@ -374,6 +387,10 @@ static int of_platform_bus_create(struct device_node *bus,
}
if (of_device_is_compatible(bus, "arm,primecell")) {
+ /*
+ * Don't return an error here to keep compatibility with older
+ * device tree files.
+ */
of_amba_device_create(bus, bus_id, platform_data, parent);
return 0;
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index f6488ad..0b7d23b 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -487,7 +487,6 @@ static void acpiphp_bus_add(acpi_handle handle)
{
struct acpi_device *adev = NULL;
- acpiphp_bus_trim(handle);
acpi_bus_scan(handle);
acpi_bus_get_device(handle, &adev);
if (adev)
@@ -529,6 +528,16 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
}
}
+static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ acpiphp_bus_add(func_to_handle(func));
+
+ return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
+}
+
/**
* enable_slot - enable, configure a slot
* @slot: slot to be enabled
@@ -543,12 +552,9 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
struct acpiphp_func *func;
int max, pass;
LIST_HEAD(add_list);
+ int nr_found;
- list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_add(func_to_handle(func));
-
- pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
-
+ nr_found = acpiphp_rescan_slot(slot);
max = acpiphp_max_busnr(bus);
for (pass = 0; pass < 2; pass++) {
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -567,8 +573,11 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
}
}
}
-
__pci_bus_assign_resources(bus, &add_list, NULL);
+ /* Nothing more to do here if there are no new devices on this bus. */
+ if (!nr_found && (slot->flags & SLOT_ENABLED))
+ return;
+
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
acpiphp_set_acpi_region(slot);
@@ -837,11 +846,22 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
case ACPI_NOTIFY_DEVICE_CHECK:
/* device check */
dbg("%s: Device check notify on %s\n", __func__, objname);
- if (bridge)
+ if (bridge) {
acpiphp_check_bridge(bridge);
- else
- acpiphp_check_bridge(func->parent);
+ } else {
+ struct acpiphp_slot *slot = func->slot;
+ int ret;
+ /*
+ * Check if anything has changed in the slot and rescan
+ * from the parent if that's the case.
+ */
+ mutex_lock(&slot->crit_sect);
+ ret = acpiphp_rescan_slot(slot);
+ mutex_unlock(&slot->crit_sect);
+ if (ret)
+ acpiphp_check_bridge(func->parent);
+ }
break;
case ACPI_NOTIFY_EJECT_REQUEST:
@@ -867,6 +887,8 @@ static void hotplug_event_work(struct work_struct *work)
hotplug_event(hp_work->handle, hp_work->type, context);
acpi_scan_lock_release();
+ acpi_evaluate_hotplug_ost(hp_work->handle, hp_work->type,
+ ACPI_OST_SC_SUCCESS, NULL);
kfree(hp_work); /* allocated in handle_hotplug_event() */
put_bridge(context->func.parent);
}
@@ -882,11 +904,15 @@ static void hotplug_event_work(struct work_struct *work)
static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
{
struct acpiphp_context *context;
+ u32 ost_code = ACPI_OST_SC_SUCCESS;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
+ break;
case ACPI_NOTIFY_EJECT_REQUEST:
+ ost_code = ACPI_OST_SC_EJECT_IN_PROGRESS;
+ acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
break;
case ACPI_NOTIFY_DEVICE_WAKE:
@@ -895,20 +921,21 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a frequency mismatch\n");
- return;
+ goto out;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a bus mode mismatch\n");
- return;
+ goto out;
case ACPI_NOTIFY_POWER_FAULT:
acpi_handle_err(handle, "Device has suffered a power fault\n");
- return;
+ goto out;
default:
acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
- return;
+ ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
+ goto out;
}
mutex_lock(&acpiphp_context_lock);
@@ -917,8 +944,14 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
get_bridge(context->func.parent);
acpiphp_put_context(context);
alloc_acpi_hp_work(handle, type, context, hotplug_event_work);
+ mutex_unlock(&acpiphp_context_lock);
+ return;
}
mutex_unlock(&acpiphp_context_lock);
+ ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+
+ out:
+ acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
}
/*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index b35f93c..d5f90d6 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -30,7 +30,6 @@ static int pci_msi_enable = 1;
/* Arch hooks */
-#if defined(CONFIG_GENERIC_HARDIRQS)
int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_chip *chip = dev->bus->msi;
@@ -67,21 +66,6 @@ int __weak arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
return chip->check_device(chip, dev, nvec, type);
}
-#else
-int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
-{
- return -ENOSYS;
-}
-
-void __weak arch_teardown_msi_irq(unsigned int irq)
-{
-}
-
-int __weak arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
-{
- return 0;
-}
-#endif /* CONFIG_GENERIC_HARDIRQS */
int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
@@ -245,8 +229,6 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
desc->masked = __msix_mask_irq(desc, flag);
}
-#ifdef CONFIG_GENERIC_HARDIRQS
-
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{
struct msi_desc *desc = irq_data_get_msi(data);
@@ -270,8 +252,6 @@ void unmask_msi_irq(struct irq_data *data)
msi_set_mask_bit(data, 0);
}
-#endif /* CONFIG_GENERIC_HARDIRQS */
-
void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
BUG_ON(entry->dev->current_state != PCI_D0);
@@ -382,10 +362,8 @@ static void free_msi_irqs(struct pci_dev *dev)
nvec = entry->nvec_used;
else
nvec = 1 << entry->msi_attrib.multiple;
-#ifdef CONFIG_GENERIC_HARDIRQS
for (i = 0; i < nvec; i++)
BUG_ON(irq_has_action(entry->irq + i));
-#endif
}
arch_teardown_msi_irqs(dev);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 36a9e60..96d6b2e 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -732,6 +732,7 @@ config SAMSUNG_LAPTOP
tristate "Samsung Laptop driver"
depends on X86
depends on RFKILL || RFKILL = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on BACKLIGHT_CLASS_DEVICE
select LEDS_CLASS
select NEW_LEDS
@@ -764,7 +765,7 @@ config INTEL_OAKTRAIL
config SAMSUNG_Q10
tristate "Samsung Q10 Extras"
- depends on SERIO_I8042
+ depends on ACPI
select BACKLIGHT_CLASS_DEVICE
---help---
This driver provides support for backlight control on Samsung Q10
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index 6296f07..da36b5e 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -85,6 +85,13 @@ static const struct dmi_system_id amilo_rfkill_id_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_BOARD_NAME, "AMILO L1310"),
+ },
+ .driver_data = (void *)&amilo_a1655_rfkill_ops
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
},
.driver_data = (void *)&amilo_m7440_rfkill_ops
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index f74bfcb..8eea2ef 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -393,17 +393,21 @@ static void gmux_notify_handler(acpi_handle device, u32 value, void *context)
complete(&gmux_data->powerchange_done);
}
-static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state)
+static int gmux_suspend(struct device *dev)
{
+ struct pnp_dev *pnp = to_pnp_dev(dev);
struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
gmux_data->resume_client_id = gmux_active_client(gmux_data);
gmux_disable_interrupts(gmux_data);
return 0;
}
-static int gmux_resume(struct pnp_dev *pnp)
+static int gmux_resume(struct device *dev)
{
+ struct pnp_dev *pnp = to_pnp_dev(dev);
struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
gmux_enable_interrupts(gmux_data);
gmux_switchto(gmux_data->resume_client_id);
if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
@@ -605,13 +609,19 @@ static const struct pnp_device_id gmux_device_ids[] = {
{"", 0}
};
+static const struct dev_pm_ops gmux_dev_pm_ops = {
+ .suspend = gmux_suspend,
+ .resume = gmux_resume,
+};
+
static struct pnp_driver gmux_pnp_driver = {
.name = "apple-gmux",
.probe = gmux_probe,
.remove = gmux_remove,
.id_table = gmux_device_ids,
- .suspend = gmux_suspend,
- .resume = gmux_resume
+ .driver = {
+ .pm = &gmux_dev_pm_ops,
+ },
};
static int __init apple_gmux_init(void)
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 36e5e6c..6dfa8d3 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -590,7 +590,7 @@ static ssize_t cmpc_accel_sensitivity_store(struct device *dev,
inputdev = dev_get_drvdata(&acpi->dev);
accel = dev_get_drvdata(&inputdev->dev);
- r = strict_strtoul(buf, 0, &sensitivity);
+ r = kstrtoul(buf, 0, &sensitivity);
if (r)
return r;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 475cc52..eaa78ed 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -425,7 +425,8 @@ static ssize_t pwm_enable_store(struct device *dev,
struct compal_data *data = dev_get_drvdata(dev);
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
if (val < 0)
@@ -463,7 +464,8 @@ static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
struct compal_data *data = dev_get_drvdata(dev);
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
if (val < 0 || val > 255)
@@ -1081,7 +1083,6 @@ static int compal_remove(struct platform_device *pdev)
hwmon_device_unregister(data->hwmon_dev);
power_supply_unregister(&data->psy);
- platform_set_drvdata(pdev, NULL);
kfree(data);
sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index d6970f4..1c86fa0 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -725,7 +725,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
(void *) HPWMI_WWAN);
if (!wwan_rfkill) {
err = -ENOMEM;
- goto register_gps_error;
+ goto register_bluetooth_error;
}
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
@@ -733,7 +733,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
hp_wmi_get_hw_state(HPWMI_WWAN));
err = rfkill_register(wwan_rfkill);
if (err)
- goto register_wwan_err;
+ goto register_wwan_error;
}
if (wireless & 0x8) {
@@ -743,7 +743,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
(void *) HPWMI_GPS);
if (!gps_rfkill) {
err = -ENOMEM;
- goto register_bluetooth_error;
+ goto register_wwan_error;
}
rfkill_init_sw_state(gps_rfkill,
hp_wmi_get_sw_state(HPWMI_GPS));
@@ -755,16 +755,16 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
}
return 0;
-register_wwan_err:
- rfkill_destroy(wwan_rfkill);
- wwan_rfkill = NULL;
- if (gps_rfkill)
- rfkill_unregister(gps_rfkill);
register_gps_error:
rfkill_destroy(gps_rfkill);
gps_rfkill = NULL;
if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
+register_wwan_error:
+ rfkill_destroy(wwan_rfkill);
+ wwan_rfkill = NULL;
+ if (gps_rfkill)
+ rfkill_unregister(gps_rfkill);
register_bluetooth_error:
rfkill_destroy(bluetooth_rfkill);
bluetooth_rfkill = NULL;
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index 9385afd..41b740c 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -193,17 +193,6 @@ static struct acpi_driver irst_driver = {
},
};
-static int irst_init(void)
-{
- return acpi_bus_register_driver(&irst_driver);
-}
-
-static void irst_exit(void)
-{
- acpi_bus_unregister_driver(&irst_driver);
-}
-
-module_init(irst_init);
-module_exit(irst_exit);
+module_acpi_driver(irst_driver);
MODULE_DEVICE_TABLE(acpi, irst_ids);
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index f74e93d..52259dc 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -74,17 +74,6 @@ static struct acpi_driver smartconnect_driver = {
},
};
-static int smartconnect_init(void)
-{
- return acpi_bus_register_driver(&smartconnect_driver);
-}
-
-static void smartconnect_exit(void)
-{
- acpi_bus_unregister_driver(&smartconnect_driver);
-}
-
-module_init(smartconnect_init);
-module_exit(smartconnect_exit);
+module_acpi_driver(smartconnect_driver);
MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index f59683a..6b18aba 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -128,7 +128,6 @@ static int mfld_pb_remove(struct platform_device *pdev)
free_irq(irq, input);
input_unregister_device(input);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 81c491e..93fab8b 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -542,7 +542,6 @@ static int mid_thermal_remove(struct platform_device *pdev)
}
kfree(pinfo);
- platform_set_drvdata(pdev, NULL);
/* Stop the ADC */
return configure_adc(0);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 984253d..10d12b2 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -643,23 +643,6 @@ out_hotkey:
return result;
}
-static int __init acpi_pcc_init(void)
-{
- int result = 0;
-
- if (acpi_disabled)
- return -ENODEV;
-
- result = acpi_bus_register_driver(&acpi_pcc_driver);
- if (result < 0) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Error registering hotkey driver\n"));
- return -ENODEV;
- }
-
- return 0;
-}
-
static int acpi_pcc_hotkey_remove(struct acpi_device *device)
{
struct pcc_acpi *pcc = acpi_driver_data(device);
@@ -679,10 +662,4 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device)
return 0;
}
-static void __exit acpi_pcc_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_pcc_driver);
-}
-
-module_init(acpi_pcc_init);
-module_exit(acpi_pcc_exit);
+module_acpi_driver(acpi_pcc_driver);
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index 4430b8c..cae7098 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -14,16 +14,12 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/backlight.h>
-#include <linux/i8042.h>
#include <linux/dmi.h>
+#include <acpi/acpi_drivers.h>
-#define SAMSUNGQ10_BL_MAX_INTENSITY 255
-#define SAMSUNGQ10_BL_DEFAULT_INTENSITY 185
+#define SAMSUNGQ10_BL_MAX_INTENSITY 7
-#define SAMSUNGQ10_BL_8042_CMD 0xbe
-#define SAMSUNGQ10_BL_8042_DATA { 0x89, 0x91 }
-
-static int samsungq10_bl_brightness;
+static acpi_handle ec_handle;
static bool force;
module_param(force, bool, 0);
@@ -33,21 +29,26 @@ MODULE_PARM_DESC(force,
static int samsungq10_bl_set_intensity(struct backlight_device *bd)
{
- int brightness = bd->props.brightness;
- unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA;
+ acpi_status status;
+ int i;
- c[2] = (unsigned char)brightness;
- i8042_lock_chip();
- i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD);
- i8042_unlock_chip();
- samsungq10_bl_brightness = brightness;
+ for (i = 0; i < SAMSUNGQ10_BL_MAX_INTENSITY; i++) {
+ status = acpi_evaluate_object(ec_handle, "_Q63", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ }
+ for (i = 0; i < bd->props.brightness; i++) {
+ status = acpi_evaluate_object(ec_handle, "_Q64", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ }
return 0;
}
static int samsungq10_bl_get_intensity(struct backlight_device *bd)
{
- return samsungq10_bl_brightness;
+ return bd->props.brightness;
}
static const struct backlight_ops samsungq10_bl_ops = {
@@ -55,28 +56,6 @@ static const struct backlight_ops samsungq10_bl_ops = {
.update_status = samsungq10_bl_set_intensity,
};
-#ifdef CONFIG_PM_SLEEP
-static int samsungq10_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int samsungq10_resume(struct device *dev)
-{
-
- struct backlight_device *bd = dev_get_drvdata(dev);
-
- samsungq10_bl_set_intensity(bd);
- return 0;
-}
-#else
-#define samsungq10_suspend NULL
-#define samsungq10_resume NULL
-#endif
-
-static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops,
- samsungq10_suspend, samsungq10_resume);
-
static int samsungq10_probe(struct platform_device *pdev)
{
@@ -93,9 +72,6 @@ static int samsungq10_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bd);
- bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
- samsungq10_bl_set_intensity(bd);
-
return 0;
}
@@ -104,9 +80,6 @@ static int samsungq10_remove(struct platform_device *pdev)
struct backlight_device *bd = platform_get_drvdata(pdev);
- bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
- samsungq10_bl_set_intensity(bd);
-
backlight_device_unregister(bd);
return 0;
@@ -116,7 +89,6 @@ static struct platform_driver samsungq10_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
- .pm = &samsungq10_pm_ops,
},
.probe = samsungq10_probe,
.remove = samsungq10_remove,
@@ -172,6 +144,11 @@ static int __init samsungq10_init(void)
if (!force && !dmi_check_system(samsungq10_dmi_table))
return -ENODEV;
+ ec_handle = ec_get_handle();
+
+ if (!ec_handle)
+ return -ENODEV;
+
samsungq10_device = platform_create_bundle(&samsungq10_driver,
samsungq10_probe,
NULL, 0, NULL, 0);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index be67e5e..03ca6c1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -369,7 +369,7 @@ struct tpacpi_led_classdev {
struct led_classdev led_classdev;
struct work_struct work;
enum led_status_t new_state;
- unsigned int led;
+ int led;
};
/* brightness level capabilities */
@@ -5296,6 +5296,16 @@ static int __init led_init(struct ibm_init_struct *iibm)
led_supported = led_init_detect_mode();
+ if (led_supported != TPACPI_LED_NONE) {
+ useful_leds = tpacpi_check_quirks(led_useful_qtable,
+ ARRAY_SIZE(led_useful_qtable));
+
+ if (!useful_leds) {
+ led_handle = NULL;
+ led_supported = TPACPI_LED_NONE;
+ }
+ }
+
vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
str_supported(led_supported), led_supported);
@@ -5309,10 +5319,9 @@ static int __init led_init(struct ibm_init_struct *iibm)
return -ENOMEM;
}
- useful_leds = tpacpi_check_quirks(led_useful_qtable,
- ARRAY_SIZE(led_useful_qtable));
-
for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
+ tpacpi_leds[i].led = -1;
+
if (!tpacpi_is_led_restricted(i) &&
test_bit(i, &useful_leds)) {
rc = tpacpi_init_led(i);
@@ -5370,9 +5379,13 @@ static int led_write(char *buf)
return -ENODEV;
while ((cmd = next_cmd(&buf))) {
- if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15)
+ if (sscanf(cmd, "%d", &led) != 1)
return -EINVAL;
+ if (led < 0 || led > (TPACPI_LED_NUMLEDS - 1) ||
+ tpacpi_leds[led].led < 0)
+ return -ENODEV;
+
if (strstr(cmd, "off")) {
s = TPACPI_LED_OFF;
} else if (strstr(cmd, "on")) {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 6e02c95..601ea95 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -780,7 +780,7 @@ static bool guid_already_parsed(const char *guid_string)
/*
* Parse the _WDG method for the GUID data blocks
*/
-static acpi_status parse_wdg(acpi_handle handle)
+static int parse_wdg(acpi_handle handle)
{
struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *obj;
@@ -812,7 +812,7 @@ static acpi_status parse_wdg(acpi_handle handle)
wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
if (!wblock)
- return AE_NO_MEMORY;
+ return -ENOMEM;
wblock->handle = handle;
wblock->gblock = gblock[i];
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 12adb43..a39ee38 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -163,6 +163,13 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
if (!pnp_drv)
return 0;
+ if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) {
+ error = pnp_drv->driver.pm->suspend(dev);
+ suspend_report_result(pnp_drv->driver.pm->suspend, error);
+ if (error)
+ return error;
+ }
+
if (pnp_drv->suspend) {
error = pnp_drv->suspend(pnp_dev, state);
if (error)
@@ -211,6 +218,12 @@ static int pnp_bus_resume(struct device *dev)
return error;
}
+ if (pnp_drv->driver.pm && pnp_drv->driver.pm->resume) {
+ error = pnp_drv->driver.pm->resume(dev);
+ if (error)
+ return error;
+ }
+
if (pnp_drv->resume) {
error = pnp_drv->resume(pnp_dev);
if (error)
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 7b8979c..e6f92b4 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -216,6 +216,13 @@ config BATTERY_S3C_ADC
help
Say Y here to enable support for iPAQ h1930/h1940/rx1950 battery
+config BATTERY_TWL4030_MADC
+ tristate "TWL4030 MADC battery driver"
+ depends on TWL4030_MADC
+ help
+ Say Y here to enable this dumb driver for batteries managed
+ through the TWL4030 MADC.
+
config CHARGER_88PM860X
tristate "Marvell 88PM860x Charger driver"
depends on MFD_88PM860X && BATTERY_88PM860X
@@ -262,7 +269,6 @@ config CHARGER_ISP1704
config CHARGER_MAX8903
tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power"
- depends on GENERIC_HARDIRQS
help
Say Y to enable support for the MAX8903 DC-DC charger and sysfs.
The driver supports controlling charger-enable and current-limit
@@ -334,6 +340,12 @@ config CHARGER_BQ2415X
You'll need this driver to charge batteries on e.g. Nokia
RX-51/N900.
+config CHARGER_BQ24190
+ tristate "TI BQ24190 battery charger driver"
+ depends on I2C && GPIOLIB
+ help
+ Say Y to enable support for the TI BQ24190 battery charger.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
@@ -357,7 +369,7 @@ config AB8500_BM
config BATTERY_GOLDFISH
tristate "Goldfish battery driver"
- depends on GENERIC_HARDIRQS
+ depends on GOLDFISH || COMPILE_TEST
help
Say Y to enable support for the battery and AC power in the
Goldfish emulator.
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 653bf6c..a4b7417 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
+obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
@@ -50,6 +51,7 @@ obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
+obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index f098fda..a4c4a10 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -774,6 +774,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
di->max_usb_in_curr.usb_type_max);
+ break;
case USB_STAT_NOT_VALID_LINK:
dev_err(di->dev, "USB Type invalid - try charging anyway\n");
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
new file mode 100644
index 0000000..ad3ff8f
--- /dev/null
+++ b/drivers/power/bq24190_charger.c
@@ -0,0 +1,1549 @@
+/*
+ * Driver for the TI bq24190 battery charger.
+ *
+ * Author: Mark A. Greer <mgreer@animalcreek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/power/bq24190_charger.h>
+
+
+#define BQ24190_MANUFACTURER "Texas Instruments"
+
+#define BQ24190_REG_ISC 0x00 /* Input Source Control */
+#define BQ24190_REG_ISC_EN_HIZ_MASK BIT(7)
+#define BQ24190_REG_ISC_EN_HIZ_SHIFT 7
+#define BQ24190_REG_ISC_VINDPM_MASK (BIT(6) | BIT(5) | BIT(4) | \
+ BIT(3))
+#define BQ24190_REG_ISC_VINDPM_SHIFT 3
+#define BQ24190_REG_ISC_IINLIM_MASK (BIT(2) | BIT(1) | BIT(0))
+#define BQ24190_REG_ISC_IINLIM_SHIFT 0
+
+#define BQ24190_REG_POC 0x01 /* Power-On Configuration */
+#define BQ24190_REG_POC_RESET_MASK BIT(7)
+#define BQ24190_REG_POC_RESET_SHIFT 7
+#define BQ24190_REG_POC_WDT_RESET_MASK BIT(6)
+#define BQ24190_REG_POC_WDT_RESET_SHIFT 6
+#define BQ24190_REG_POC_CHG_CONFIG_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_POC_CHG_CONFIG_SHIFT 4
+#define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1))
+#define BQ24190_REG_POC_SYS_MIN_SHIFT 1
+#define BQ24190_REG_POC_BOOST_LIM_MASK BIT(0)
+#define BQ24190_REG_POC_BOOST_LIM_SHIFT 0
+
+#define BQ24190_REG_CCC 0x02 /* Charge Current Control */
+#define BQ24190_REG_CCC_ICHG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_CCC_ICHG_SHIFT 2
+#define BQ24190_REG_CCC_FORCE_20PCT_MASK BIT(0)
+#define BQ24190_REG_CCC_FORCE_20PCT_SHIFT 0
+
+#define BQ24190_REG_PCTCC 0x03 /* Pre-charge/Termination Current Cntl */
+#define BQ24190_REG_PCTCC_IPRECHG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4))
+#define BQ24190_REG_PCTCC_IPRECHG_SHIFT 4
+#define BQ24190_REG_PCTCC_ITERM_MASK (BIT(3) | BIT(2) | BIT(1) | \
+ BIT(0))
+#define BQ24190_REG_PCTCC_ITERM_SHIFT 0
+
+#define BQ24190_REG_CVC 0x04 /* Charge Voltage Control */
+#define BQ24190_REG_CVC_VREG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_CVC_VREG_SHIFT 2
+#define BQ24190_REG_CVC_BATLOWV_MASK BIT(1)
+#define BQ24190_REG_CVC_BATLOWV_SHIFT 1
+#define BQ24190_REG_CVC_VRECHG_MASK BIT(0)
+#define BQ24190_REG_CVC_VRECHG_SHIFT 0
+
+#define BQ24190_REG_CTTC 0x05 /* Charge Term/Timer Control */
+#define BQ24190_REG_CTTC_EN_TERM_MASK BIT(7)
+#define BQ24190_REG_CTTC_EN_TERM_SHIFT 7
+#define BQ24190_REG_CTTC_TERM_STAT_MASK BIT(6)
+#define BQ24190_REG_CTTC_TERM_STAT_SHIFT 6
+#define BQ24190_REG_CTTC_WATCHDOG_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_CTTC_WATCHDOG_SHIFT 4
+#define BQ24190_REG_CTTC_EN_TIMER_MASK BIT(3)
+#define BQ24190_REG_CTTC_EN_TIMER_SHIFT 3
+#define BQ24190_REG_CTTC_CHG_TIMER_MASK (BIT(2) | BIT(1))
+#define BQ24190_REG_CTTC_CHG_TIMER_SHIFT 1
+#define BQ24190_REG_CTTC_JEITA_ISET_MASK BIT(0)
+#define BQ24190_REG_CTTC_JEITA_ISET_SHIFT 0
+
+#define BQ24190_REG_ICTRC 0x06 /* IR Comp/Thermal Regulation Control */
+#define BQ24190_REG_ICTRC_BAT_COMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BQ24190_REG_ICTRC_BAT_COMP_SHIFT 5
+#define BQ24190_REG_ICTRC_VCLAMP_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_ICTRC_VCLAMP_SHIFT 2
+#define BQ24190_REG_ICTRC_TREG_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_ICTRC_TREG_SHIFT 0
+
+#define BQ24190_REG_MOC 0x07 /* Misc. Operation Control */
+#define BQ24190_REG_MOC_DPDM_EN_MASK BIT(7)
+#define BQ24190_REG_MOC_DPDM_EN_SHIFT 7
+#define BQ24190_REG_MOC_TMR2X_EN_MASK BIT(6)
+#define BQ24190_REG_MOC_TMR2X_EN_SHIFT 6
+#define BQ24190_REG_MOC_BATFET_DISABLE_MASK BIT(5)
+#define BQ24190_REG_MOC_BATFET_DISABLE_SHIFT 5
+#define BQ24190_REG_MOC_JEITA_VSET_MASK BIT(4)
+#define BQ24190_REG_MOC_JEITA_VSET_SHIFT 4
+#define BQ24190_REG_MOC_INT_MASK_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_MOC_INT_MASK_SHIFT 0
+
+#define BQ24190_REG_SS 0x08 /* System Status */
+#define BQ24190_REG_SS_VBUS_STAT_MASK (BIT(7) | BIT(6))
+#define BQ24190_REG_SS_VBUS_STAT_SHIFT 6
+#define BQ24190_REG_SS_CHRG_STAT_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_SS_CHRG_STAT_SHIFT 4
+#define BQ24190_REG_SS_DPM_STAT_MASK BIT(3)
+#define BQ24190_REG_SS_DPM_STAT_SHIFT 3
+#define BQ24190_REG_SS_PG_STAT_MASK BIT(2)
+#define BQ24190_REG_SS_PG_STAT_SHIFT 2
+#define BQ24190_REG_SS_THERM_STAT_MASK BIT(1)
+#define BQ24190_REG_SS_THERM_STAT_SHIFT 1
+#define BQ24190_REG_SS_VSYS_STAT_MASK BIT(0)
+#define BQ24190_REG_SS_VSYS_STAT_SHIFT 0
+
+#define BQ24190_REG_F 0x09 /* Fault */
+#define BQ24190_REG_F_WATCHDOG_FAULT_MASK BIT(7)
+#define BQ24190_REG_F_WATCHDOG_FAULT_SHIFT 7
+#define BQ24190_REG_F_BOOST_FAULT_MASK BIT(6)
+#define BQ24190_REG_F_BOOST_FAULT_SHIFT 6
+#define BQ24190_REG_F_CHRG_FAULT_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_F_CHRG_FAULT_SHIFT 4
+#define BQ24190_REG_F_BAT_FAULT_MASK BIT(3)
+#define BQ24190_REG_F_BAT_FAULT_SHIFT 3
+#define BQ24190_REG_F_NTC_FAULT_MASK (BIT(2) | BIT(1) | BIT(0))
+#define BQ24190_REG_F_NTC_FAULT_SHIFT 0
+
+#define BQ24190_REG_VPRS 0x0A /* Vendor/Part/Revision Status */
+#define BQ24190_REG_VPRS_PN_MASK (BIT(5) | BIT(4) | BIT(3))
+#define BQ24190_REG_VPRS_PN_SHIFT 3
+#define BQ24190_REG_VPRS_PN_24190 0x4
+#define BQ24190_REG_VPRS_PN_24192 0x5 /* Also 24193 */
+#define BQ24190_REG_VPRS_PN_24192I 0x3
+#define BQ24190_REG_VPRS_TS_PROFILE_MASK BIT(2)
+#define BQ24190_REG_VPRS_TS_PROFILE_SHIFT 2
+#define BQ24190_REG_VPRS_DEV_REG_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_VPRS_DEV_REG_SHIFT 0
+
+/*
+ * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
+ * so the first read after a fault returns the latched value and subsequent
+ * reads return the current value. In order to return the fault status
+ * to the user, have the interrupt handler save the reg's value and retrieve
+ * it in the appropriate health/status routine. Each routine has its own
+ * flag indicating whether it should use the value stored by the last run
+ * of the interrupt handler or do an actual reg read. That way each routine
+ * can report back whatever fault may have occured.
+ */
+struct bq24190_dev_info {
+ struct i2c_client *client;
+ struct device *dev;
+ struct power_supply charger;
+ struct power_supply battery;
+ char model_name[I2C_NAME_SIZE];
+ kernel_ulong_t model;
+ unsigned int gpio_int;
+ unsigned int irq;
+ struct mutex f_reg_lock;
+ bool first_time;
+ bool charger_health_valid;
+ bool battery_health_valid;
+ bool battery_status_valid;
+ u8 f_reg;
+ u8 ss_reg;
+ u8 watchdog;
+};
+
+/*
+ * The tables below provide a 2-way mapping for the value that goes in
+ * the register field and the real-world value that it represents.
+ * The index of the array is the value that goes in the register; the
+ * number at that index in the array is the real-world value that it
+ * represents.
+ */
+/* REG02[7:2] (ICHG) in uAh */
+static const int bq24190_ccc_ichg_values[] = {
+ 512000, 576000, 640000, 704000, 768000, 832000, 896000, 960000,
+ 1024000, 1088000, 1152000, 1216000, 1280000, 1344000, 1408000, 1472000,
+ 1536000, 1600000, 1664000, 1728000, 1792000, 1856000, 1920000, 1984000,
+ 2048000, 2112000, 2176000, 2240000, 2304000, 2368000, 2432000, 2496000,
+ 2560000, 2624000, 2688000, 2752000, 2816000, 2880000, 2944000, 3008000,
+ 3072000, 3136000, 3200000, 3264000, 3328000, 3392000, 3456000, 3520000,
+ 3584000, 3648000, 3712000, 3776000, 3840000, 3904000, 3968000, 4032000,
+ 4096000, 4160000, 4224000, 4288000, 4352000, 4416000, 4480000, 4544000
+};
+
+/* REG04[7:2] (VREG) in uV */
+static const int bq24190_cvc_vreg_values[] = {
+ 3504000, 3520000, 3536000, 3552000, 3568000, 3584000, 3600000, 3616000,
+ 3632000, 3648000, 3664000, 3680000, 3696000, 3712000, 3728000, 3744000,
+ 3760000, 3776000, 3792000, 3808000, 3824000, 3840000, 3856000, 3872000,
+ 3888000, 3904000, 3920000, 3936000, 3952000, 3968000, 3984000, 4000000,
+ 4016000, 4032000, 4048000, 4064000, 4080000, 4096000, 4112000, 4128000,
+ 4144000, 4160000, 4176000, 4192000, 4208000, 4224000, 4240000, 4256000,
+ 4272000, 4288000, 4304000, 4320000, 4336000, 4352000, 4368000, 4384000,
+ 4400000
+};
+
+/* REG06[1:0] (TREG) in tenths of degrees Celcius */
+static const int bq24190_ictrc_treg_values[] = {
+ 600, 800, 1000, 1200
+};
+
+/*
+ * Return the index in 'tbl' of greatest value that is less than or equal to
+ * 'val'. The index range returned is 0 to 'tbl_size' - 1. Assumes that
+ * the values in 'tbl' are sorted from smallest to largest and 'tbl_size'
+ * is less than 2^8.
+ */
+static u8 bq24190_find_idx(const int tbl[], int tbl_size, int v)
+{
+ int i;
+
+ for (i = 1; i < tbl_size; i++)
+ if (v < tbl[i])
+ break;
+
+ return i - 1;
+}
+
+/* Basic driver I/O routines */
+
+static int bq24190_read(struct bq24190_dev_info *bdi, u8 reg, u8 *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(bdi->client, reg);
+ if (ret < 0)
+ return ret;
+
+ *data = ret;
+ return 0;
+}
+
+static int bq24190_write(struct bq24190_dev_info *bdi, u8 reg, u8 data)
+{
+ return i2c_smbus_write_byte_data(bdi->client, reg, data);
+}
+
+static int bq24190_read_mask(struct bq24190_dev_info *bdi, u8 reg,
+ u8 mask, u8 shift, u8 *data)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read(bdi, reg, &v);
+ if (ret < 0)
+ return ret;
+
+ v &= mask;
+ v >>= shift;
+ *data = v;
+
+ return 0;
+}
+
+static int bq24190_write_mask(struct bq24190_dev_info *bdi, u8 reg,
+ u8 mask, u8 shift, u8 data)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read(bdi, reg, &v);
+ if (ret < 0)
+ return ret;
+
+ v &= ~mask;
+ v |= ((data << shift) & mask);
+
+ return bq24190_write(bdi, reg, v);
+}
+
+static int bq24190_get_field_val(struct bq24190_dev_info *bdi,
+ u8 reg, u8 mask, u8 shift,
+ const int tbl[], int tbl_size,
+ int *val)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, reg, mask, shift, &v);
+ if (ret < 0)
+ return ret;
+
+ v = (v >= tbl_size) ? (tbl_size - 1) : v;
+ *val = tbl[v];
+
+ return 0;
+}
+
+static int bq24190_set_field_val(struct bq24190_dev_info *bdi,
+ u8 reg, u8 mask, u8 shift,
+ const int tbl[], int tbl_size,
+ int val)
+{
+ u8 idx;
+
+ idx = bq24190_find_idx(tbl, tbl_size, val);
+
+ return bq24190_write_mask(bdi, reg, mask, shift, idx);
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * There are a numerous options that are configurable on the bq24190
+ * that go well beyond what the power_supply properties provide access to.
+ * Provide sysfs access to them so they can be examined and possibly modified
+ * on the fly. They will be provided for the charger power_supply object only
+ * and will be prefixed by 'f_' to make them easier to recognize.
+ */
+
+#define BQ24190_SYSFS_FIELD(_name, r, f, m, store) \
+{ \
+ .attr = __ATTR(f_##_name, m, bq24190_sysfs_show, store), \
+ .reg = BQ24190_REG_##r, \
+ .mask = BQ24190_REG_##r##_##f##_MASK, \
+ .shift = BQ24190_REG_##r##_##f##_SHIFT, \
+}
+
+#define BQ24190_SYSFS_FIELD_RW(_name, r, f) \
+ BQ24190_SYSFS_FIELD(_name, r, f, S_IWUSR | S_IRUGO, \
+ bq24190_sysfs_store)
+
+#define BQ24190_SYSFS_FIELD_RO(_name, r, f) \
+ BQ24190_SYSFS_FIELD(_name, r, f, S_IRUGO, NULL)
+
+static ssize_t bq24190_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t bq24190_sysfs_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+struct bq24190_sysfs_field_info {
+ struct device_attribute attr;
+ u8 reg;
+ u8 mask;
+ u8 shift;
+};
+
+/* On i386 ptrace-abi.h defines SS that breaks the macro calls below. */
+#undef SS
+
+static struct bq24190_sysfs_field_info bq24190_sysfs_field_tbl[] = {
+ /* sysfs name reg field in reg */
+ BQ24190_SYSFS_FIELD_RW(en_hiz, ISC, EN_HIZ),
+ BQ24190_SYSFS_FIELD_RW(vindpm, ISC, VINDPM),
+ BQ24190_SYSFS_FIELD_RW(iinlim, ISC, IINLIM),
+ BQ24190_SYSFS_FIELD_RW(chg_config, POC, CHG_CONFIG),
+ BQ24190_SYSFS_FIELD_RW(sys_min, POC, SYS_MIN),
+ BQ24190_SYSFS_FIELD_RW(boost_lim, POC, BOOST_LIM),
+ BQ24190_SYSFS_FIELD_RW(ichg, CCC, ICHG),
+ BQ24190_SYSFS_FIELD_RW(force_20_pct, CCC, FORCE_20PCT),
+ BQ24190_SYSFS_FIELD_RW(iprechg, PCTCC, IPRECHG),
+ BQ24190_SYSFS_FIELD_RW(iterm, PCTCC, ITERM),
+ BQ24190_SYSFS_FIELD_RW(vreg, CVC, VREG),
+ BQ24190_SYSFS_FIELD_RW(batlowv, CVC, BATLOWV),
+ BQ24190_SYSFS_FIELD_RW(vrechg, CVC, VRECHG),
+ BQ24190_SYSFS_FIELD_RW(en_term, CTTC, EN_TERM),
+ BQ24190_SYSFS_FIELD_RW(term_stat, CTTC, TERM_STAT),
+ BQ24190_SYSFS_FIELD_RO(watchdog, CTTC, WATCHDOG),
+ BQ24190_SYSFS_FIELD_RW(en_timer, CTTC, EN_TIMER),
+ BQ24190_SYSFS_FIELD_RW(chg_timer, CTTC, CHG_TIMER),
+ BQ24190_SYSFS_FIELD_RW(jeta_iset, CTTC, JEITA_ISET),
+ BQ24190_SYSFS_FIELD_RW(bat_comp, ICTRC, BAT_COMP),
+ BQ24190_SYSFS_FIELD_RW(vclamp, ICTRC, VCLAMP),
+ BQ24190_SYSFS_FIELD_RW(treg, ICTRC, TREG),
+ BQ24190_SYSFS_FIELD_RW(dpdm_en, MOC, DPDM_EN),
+ BQ24190_SYSFS_FIELD_RW(tmr2x_en, MOC, TMR2X_EN),
+ BQ24190_SYSFS_FIELD_RW(batfet_disable, MOC, BATFET_DISABLE),
+ BQ24190_SYSFS_FIELD_RW(jeita_vset, MOC, JEITA_VSET),
+ BQ24190_SYSFS_FIELD_RO(int_mask, MOC, INT_MASK),
+ BQ24190_SYSFS_FIELD_RO(vbus_stat, SS, VBUS_STAT),
+ BQ24190_SYSFS_FIELD_RO(chrg_stat, SS, CHRG_STAT),
+ BQ24190_SYSFS_FIELD_RO(dpm_stat, SS, DPM_STAT),
+ BQ24190_SYSFS_FIELD_RO(pg_stat, SS, PG_STAT),
+ BQ24190_SYSFS_FIELD_RO(therm_stat, SS, THERM_STAT),
+ BQ24190_SYSFS_FIELD_RO(vsys_stat, SS, VSYS_STAT),
+ BQ24190_SYSFS_FIELD_RO(watchdog_fault, F, WATCHDOG_FAULT),
+ BQ24190_SYSFS_FIELD_RO(boost_fault, F, BOOST_FAULT),
+ BQ24190_SYSFS_FIELD_RO(chrg_fault, F, CHRG_FAULT),
+ BQ24190_SYSFS_FIELD_RO(bat_fault, F, BAT_FAULT),
+ BQ24190_SYSFS_FIELD_RO(ntc_fault, F, NTC_FAULT),
+ BQ24190_SYSFS_FIELD_RO(pn, VPRS, PN),
+ BQ24190_SYSFS_FIELD_RO(ts_profile, VPRS, TS_PROFILE),
+ BQ24190_SYSFS_FIELD_RO(dev_reg, VPRS, DEV_REG),
+};
+
+static struct attribute *
+ bq24190_sysfs_attrs[ARRAY_SIZE(bq24190_sysfs_field_tbl) + 1];
+
+static const struct attribute_group bq24190_sysfs_attr_group = {
+ .attrs = bq24190_sysfs_attrs,
+};
+
+static void bq24190_sysfs_init_attrs(void)
+{
+ int i, limit = ARRAY_SIZE(bq24190_sysfs_field_tbl);
+
+ for (i = 0; i < limit; i++)
+ bq24190_sysfs_attrs[i] = &bq24190_sysfs_field_tbl[i].attr.attr;
+
+ bq24190_sysfs_attrs[limit] = NULL; /* Has additional entry for this */
+}
+
+static struct bq24190_sysfs_field_info *bq24190_sysfs_field_lookup(
+ const char *name)
+{
+ int i, limit = ARRAY_SIZE(bq24190_sysfs_field_tbl);
+
+ for (i = 0; i < limit; i++)
+ if (!strcmp(name, bq24190_sysfs_field_tbl[i].attr.attr.name))
+ break;
+
+ if (i >= limit)
+ return NULL;
+
+ return &bq24190_sysfs_field_tbl[i];
+}
+
+static ssize_t bq24190_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ struct bq24190_sysfs_field_info *info;
+ int ret;
+ u8 v;
+
+ info = bq24190_sysfs_field_lookup(attr->attr.name);
+ if (!info)
+ return -EINVAL;
+
+ ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%hhx\n", v);
+}
+
+static ssize_t bq24190_sysfs_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ struct bq24190_sysfs_field_info *info;
+ int ret;
+ u8 v;
+
+ info = bq24190_sysfs_field_lookup(attr->attr.name);
+ if (!info)
+ return -EINVAL;
+
+ ret = kstrtou8(buf, 0, &v);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi)
+{
+ bq24190_sysfs_init_attrs();
+
+ return sysfs_create_group(&bdi->charger.dev->kobj,
+ &bq24190_sysfs_attr_group);
+}
+
+static void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi)
+{
+ sysfs_remove_group(&bdi->charger.dev->kobj, &bq24190_sysfs_attr_group);
+}
+#else
+static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi)
+{
+ return 0;
+}
+
+static inline void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi) {}
+#endif
+
+/*
+ * According to the "Host Mode and default Mode" section of the
+ * manual, a write to any register causes the bq24190 to switch
+ * from default mode to host mode. It will switch back to default
+ * mode after a WDT timeout unless the WDT is turned off as well.
+ * So, by simply turning off the WDT, we accomplish both with the
+ * same write.
+ */
+static int bq24190_set_mode_host(struct bq24190_dev_info *bdi)
+{
+ int ret;
+ u8 v;
+
+ ret = bq24190_read(bdi, BQ24190_REG_CTTC, &v);
+ if (ret < 0)
+ return ret;
+
+ bdi->watchdog = ((v & BQ24190_REG_CTTC_WATCHDOG_MASK) >>
+ BQ24190_REG_CTTC_WATCHDOG_SHIFT);
+ v &= ~BQ24190_REG_CTTC_WATCHDOG_MASK;
+
+ return bq24190_write(bdi, BQ24190_REG_CTTC, v);
+}
+
+static int bq24190_register_reset(struct bq24190_dev_info *bdi)
+{
+ int ret, limit = 100;
+ u8 v;
+
+ /* Reset the registers */
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_RESET_MASK,
+ BQ24190_REG_POC_RESET_SHIFT,
+ 0x1);
+ if (ret < 0)
+ return ret;
+
+ /* Reset bit will be cleared by hardware so poll until it is */
+ do {
+ ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_RESET_MASK,
+ BQ24190_REG_POC_RESET_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ if (!v)
+ break;
+
+ udelay(10);
+ } while (--limit);
+
+ if (!limit)
+ return -EIO;
+
+ return 0;
+}
+
+/* Charger power supply property routines */
+
+static int bq24190_charger_get_charge_type(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int type, ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_CHG_CONFIG_MASK,
+ BQ24190_REG_POC_CHG_CONFIG_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ /* If POC[CHG_CONFIG] (REG01[5:4]) == 0, charge is disabled */
+ if (!v) {
+ type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ } else {
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ type = (v) ? POWER_SUPPLY_CHARGE_TYPE_TRICKLE :
+ POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+
+ val->intval = type;
+
+ return 0;
+}
+
+static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ u8 chg_config, force_20pct, en_term;
+ int ret;
+
+ /*
+ * According to the "Termination when REG02[0] = 1" section of
+ * the bq24190 manual, the trickle charge could be less than the
+ * termination current so it recommends turning off the termination
+ * function.
+ *
+ * Note: AFAICT from the datasheet, the user will have to manually
+ * turn off the charging when in 20% mode. If its not turned off,
+ * there could be battery damage. So, use this mode at your own risk.
+ */
+ switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_TYPE_NONE:
+ chg_config = 0x0;
+ break;
+ case POWER_SUPPLY_CHARGE_TYPE_TRICKLE:
+ chg_config = 0x1;
+ force_20pct = 0x1;
+ en_term = 0x0;
+ break;
+ case POWER_SUPPLY_CHARGE_TYPE_FAST:
+ chg_config = 0x1;
+ force_20pct = 0x0;
+ en_term = 0x1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (chg_config) { /* Enabling the charger */
+ ret = bq24190_write_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT,
+ force_20pct);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_write_mask(bdi, BQ24190_REG_CTTC,
+ BQ24190_REG_CTTC_EN_TERM_MASK,
+ BQ24190_REG_CTTC_EN_TERM_SHIFT,
+ en_term);
+ if (ret < 0)
+ return ret;
+ }
+
+ return bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_CHG_CONFIG_MASK,
+ BQ24190_REG_POC_CHG_CONFIG_SHIFT, chg_config);
+}
+
+static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int health, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->charger_health_valid) {
+ v = bdi->f_reg;
+ bdi->charger_health_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &v);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
+ /*
+ * This could be over-current or over-voltage but there's
+ * no way to tell which. Return 'OVERVOLTAGE' since there
+ * isn't an 'OVERCURRENT' value defined that we can return
+ * even if it was over-current.
+ */
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else {
+ v &= BQ24190_REG_F_CHRG_FAULT_MASK;
+ v >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
+
+ switch (v) {
+ case 0x0: /* Normal */
+ health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case 0x1: /* Input Fault (VBUS OVP or VBAT<VBUS<3.8V) */
+ /*
+ * This could be over-voltage or under-voltage
+ * and there's no way to tell which. Instead
+ * of looking foolish and returning 'OVERVOLTAGE'
+ * when its really under-voltage, just return
+ * 'UNSPEC_FAILURE'.
+ */
+ health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ case 0x2: /* Thermal Shutdown */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ case 0x3: /* Charge Safety Timer Expiration */
+ health = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ }
+
+ val->intval = health;
+
+ return 0;
+}
+
+static int bq24190_charger_get_online(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_SS,
+ BQ24190_REG_SS_PG_STAT_MASK,
+ BQ24190_REG_SS_PG_STAT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ val->intval = v;
+ return 0;
+}
+
+static int bq24190_charger_get_current(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int curr, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
+ bq24190_ccc_ichg_values,
+ ARRAY_SIZE(bq24190_ccc_ichg_values), &curr);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ /* If FORCE_20PCT is enabled, then current is 20% of ICHG value */
+ if (v)
+ curr /= 5;
+
+ val->intval = curr;
+ return 0;
+}
+
+static int bq24190_charger_get_current_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int idx = ARRAY_SIZE(bq24190_ccc_ichg_values) - 1;
+
+ val->intval = bq24190_ccc_ichg_values[idx];
+ return 0;
+}
+
+static int bq24190_charger_set_current(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ u8 v;
+ int ret, curr = val->intval;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ /* If FORCE_20PCT is enabled, have to multiply value passed in by 5 */
+ if (v)
+ curr *= 5;
+
+ return bq24190_set_field_val(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
+ bq24190_ccc_ichg_values,
+ ARRAY_SIZE(bq24190_ccc_ichg_values), curr);
+}
+
+static int bq24190_charger_get_voltage(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int voltage, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_CVC,
+ BQ24190_REG_CVC_VREG_MASK, BQ24190_REG_CVC_VREG_SHIFT,
+ bq24190_cvc_vreg_values,
+ ARRAY_SIZE(bq24190_cvc_vreg_values), &voltage);
+ if (ret < 0)
+ return ret;
+
+ val->intval = voltage;
+ return 0;
+}
+
+static int bq24190_charger_get_voltage_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int idx = ARRAY_SIZE(bq24190_cvc_vreg_values) - 1;
+
+ val->intval = bq24190_cvc_vreg_values[idx];
+ return 0;
+}
+
+static int bq24190_charger_set_voltage(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_set_field_val(bdi, BQ24190_REG_CVC,
+ BQ24190_REG_CVC_VREG_MASK, BQ24190_REG_CVC_VREG_SHIFT,
+ bq24190_cvc_vreg_values,
+ ARRAY_SIZE(bq24190_cvc_vreg_values), val->intval);
+}
+
+static int bq24190_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = bq24190_charger_get_charge_type(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq24190_charger_get_health(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_charger_get_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq24190_charger_get_current(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ ret = bq24190_charger_get_current_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq24190_charger_get_voltage(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ ret = bq24190_charger_get_voltage_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bdi->model_name;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = BQ24190_MANUFACTURER;
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = bq24190_charger_set_charge_type(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq24190_charger_set_current(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq24190_charger_set_voltage(bdi, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property bq24190_charger_properties[] = {
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static char *bq24190_charger_supplied_to[] = {
+ "main-battery",
+};
+
+static void bq24190_charger_init(struct power_supply *charger)
+{
+ charger->name = "bq24190-charger";
+ charger->type = POWER_SUPPLY_TYPE_USB;
+ charger->properties = bq24190_charger_properties;
+ charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
+ charger->supplied_to = bq24190_charger_supplied_to;
+ charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
+ charger->get_property = bq24190_charger_get_property;
+ charger->set_property = bq24190_charger_set_property;
+ charger->property_is_writeable = bq24190_charger_property_is_writeable;
+}
+
+/* Battery power supply property routines */
+
+static int bq24190_battery_get_status(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 ss_reg, chrg_fault;
+ int status, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->battery_status_valid) {
+ chrg_fault = bdi->f_reg;
+ bdi->battery_status_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
+ if (ret < 0)
+ return ret;
+ }
+
+ chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
+ chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The battery must be discharging when any of these are true:
+ * - there is no good power source;
+ * - there is a charge fault.
+ * Could also be discharging when in "supplement mode" but
+ * there is no way to tell when its in that mode.
+ */
+ if (!(ss_reg & BQ24190_REG_SS_PG_STAT_MASK) || chrg_fault) {
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else {
+ ss_reg &= BQ24190_REG_SS_CHRG_STAT_MASK;
+ ss_reg >>= BQ24190_REG_SS_CHRG_STAT_SHIFT;
+
+ switch (ss_reg) {
+ case 0x0: /* Not Charging */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case 0x1: /* Pre-charge */
+ case 0x2: /* Fast Charging */
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x3: /* Charge Termination Done */
+ status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ ret = -EIO;
+ }
+ }
+
+ if (!ret)
+ val->intval = status;
+
+ return ret;
+}
+
+static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int health, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->battery_health_valid) {
+ v = bdi->f_reg;
+ bdi->battery_health_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &v);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else {
+ v &= BQ24190_REG_F_NTC_FAULT_MASK;
+ v >>= BQ24190_REG_F_NTC_FAULT_SHIFT;
+
+ switch (v) {
+ case 0x0: /* Normal */
+ health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case 0x1: /* TS1 Cold */
+ case 0x3: /* TS2 Cold */
+ case 0x5: /* Both Cold */
+ health = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ case 0x2: /* TS1 Hot */
+ case 0x4: /* TS2 Hot */
+ case 0x6: /* Both Hot */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ }
+
+ val->intval = health;
+ return 0;
+}
+
+static int bq24190_battery_get_online(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 batfet_disable;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_MOC,
+ BQ24190_REG_MOC_BATFET_DISABLE_MASK,
+ BQ24190_REG_MOC_BATFET_DISABLE_SHIFT, &batfet_disable);
+ if (ret < 0)
+ return ret;
+
+ val->intval = !batfet_disable;
+ return 0;
+}
+
+static int bq24190_battery_set_online(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_write_mask(bdi, BQ24190_REG_MOC,
+ BQ24190_REG_MOC_BATFET_DISABLE_MASK,
+ BQ24190_REG_MOC_BATFET_DISABLE_SHIFT, !val->intval);
+}
+
+static int bq24190_battery_get_temp_alert_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int temp, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_ICTRC,
+ BQ24190_REG_ICTRC_TREG_MASK,
+ BQ24190_REG_ICTRC_TREG_SHIFT,
+ bq24190_ictrc_treg_values,
+ ARRAY_SIZE(bq24190_ictrc_treg_values), &temp);
+ if (ret < 0)
+ return ret;
+
+ val->intval = temp;
+ return 0;
+}
+
+static int bq24190_battery_set_temp_alert_max(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_set_field_val(bdi, BQ24190_REG_ICTRC,
+ BQ24190_REG_ICTRC_TREG_MASK,
+ BQ24190_REG_ICTRC_TREG_SHIFT,
+ bq24190_ictrc_treg_values,
+ ARRAY_SIZE(bq24190_ictrc_treg_values), val->intval);
+}
+
+static int bq24190_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, battery);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq24190_battery_get_status(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq24190_battery_get_health(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_battery_get_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ /* Could be Li-on or Li-polymer but no way to tell which */
+ val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = bq24190_battery_get_temp_alert_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, battery);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_put_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_battery_set_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = bq24190_battery_set_temp_alert_max(bdi, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property bq24190_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static void bq24190_battery_init(struct power_supply *battery)
+{
+ battery->name = "bq24190-battery";
+ battery->type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->properties = bq24190_battery_properties;
+ battery->num_properties = ARRAY_SIZE(bq24190_battery_properties);
+ battery->get_property = bq24190_battery_get_property;
+ battery->set_property = bq24190_battery_set_property;
+ battery->property_is_writeable = bq24190_battery_property_is_writeable;
+}
+
+static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
+{
+ struct bq24190_dev_info *bdi = data;
+ bool alert_userspace = false;
+ u8 ss_reg, f_reg;
+ int ret;
+
+ pm_runtime_get_sync(bdi->dev);
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
+ if (ret < 0) {
+ dev_err(bdi->dev, "Can't read SS reg: %d\n", ret);
+ goto out;
+ }
+
+ if (ss_reg != bdi->ss_reg) {
+ /*
+ * The device is in host mode so when PG_STAT goes from 1->0
+ * (i.e., power removed) HIZ needs to be disabled.
+ */
+ if ((bdi->ss_reg & BQ24190_REG_SS_PG_STAT_MASK) &&
+ !(ss_reg & BQ24190_REG_SS_PG_STAT_MASK)) {
+ ret = bq24190_write_mask(bdi, BQ24190_REG_ISC,
+ BQ24190_REG_ISC_EN_HIZ_MASK,
+ BQ24190_REG_ISC_EN_HIZ_SHIFT,
+ 0);
+ if (ret < 0)
+ dev_err(bdi->dev, "Can't access ISC reg: %d\n",
+ ret);
+ }
+
+ bdi->ss_reg = ss_reg;
+ alert_userspace = true;
+ }
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+ if (ret < 0) {
+ mutex_unlock(&bdi->f_reg_lock);
+ dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+ goto out;
+ }
+
+ if (f_reg != bdi->f_reg) {
+ bdi->f_reg = f_reg;
+ bdi->charger_health_valid = true;
+ bdi->battery_health_valid = true;
+ bdi->battery_status_valid = true;
+
+ alert_userspace = true;
+ }
+
+ mutex_unlock(&bdi->f_reg_lock);
+
+ /*
+ * Sometimes bq24190 gives a steady trickle of interrupts even
+ * though the watchdog timer is turned off and neither the STATUS
+ * nor FAULT registers have changed. Weed out these sprurious
+ * interrupts so userspace isn't alerted for no reason.
+ * In addition, the chip always generates an interrupt after
+ * register reset so we should ignore that one (the very first
+ * interrupt received).
+ */
+ if (alert_userspace && !bdi->first_time) {
+ power_supply_changed(&bdi->charger);
+ power_supply_changed(&bdi->battery);
+ bdi->first_time = false;
+ }
+
+out:
+ pm_runtime_put_sync(bdi->dev);
+
+ dev_dbg(bdi->dev, "ss_reg: 0x%02x, f_reg: 0x%02x\n", ss_reg, f_reg);
+
+ return IRQ_HANDLED;
+}
+
+static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+{
+ u8 v;
+ int ret;
+
+ pm_runtime_get_sync(bdi->dev);
+
+ /* First check that the device really is what its supposed to be */
+ ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
+ BQ24190_REG_VPRS_PN_MASK,
+ BQ24190_REG_VPRS_PN_SHIFT,
+ &v);
+ if (ret < 0)
+ goto out;
+
+ if (v != bdi->model) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = bq24190_register_reset(bdi);
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_set_mode_host(bdi);
+out:
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static int bq24190_setup_dt(struct bq24190_dev_info *bdi)
+{
+ bdi->irq = irq_of_parse_and_map(bdi->dev->of_node, 0);
+ if (bdi->irq <= 0)
+ return -1;
+
+ return 0;
+}
+#else
+static int bq24190_setup_dt(struct bq24190_dev_info *bdi)
+{
+ return -1;
+}
+#endif
+
+static int bq24190_setup_pdata(struct bq24190_dev_info *bdi,
+ struct bq24190_platform_data *pdata)
+{
+ int ret;
+
+ if (!gpio_is_valid(pdata->gpio_int))
+ return -1;
+
+ ret = gpio_request(pdata->gpio_int, dev_name(bdi->dev));
+ if (ret < 0)
+ return -1;
+
+ ret = gpio_direction_input(pdata->gpio_int);
+ if (ret < 0)
+ goto out;
+
+ bdi->irq = gpio_to_irq(pdata->gpio_int);
+ if (!bdi->irq)
+ goto out;
+
+ bdi->gpio_int = pdata->gpio_int;
+ return 0;
+
+out:
+ gpio_free(pdata->gpio_int);
+ return -1;
+}
+
+static int bq24190_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct device *dev = &client->dev;
+ struct bq24190_platform_data *pdata = client->dev.platform_data;
+ struct bq24190_dev_info *bdi;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(dev, "No support for SMBUS_BYTE_DATA\n");
+ return -ENODEV;
+ }
+
+ bdi = devm_kzalloc(dev, sizeof(*bdi), GFP_KERNEL);
+ if (!bdi) {
+ dev_err(dev, "Can't alloc bdi struct\n");
+ return -ENOMEM;
+ }
+
+ bdi->client = client;
+ bdi->dev = dev;
+ bdi->model = id->driver_data;
+ strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
+ mutex_init(&bdi->f_reg_lock);
+ bdi->first_time = true;
+ bdi->charger_health_valid = false;
+ bdi->battery_health_valid = false;
+ bdi->battery_status_valid = false;
+
+ i2c_set_clientdata(client, bdi);
+
+ if (dev->of_node)
+ ret = bq24190_setup_dt(bdi);
+ else
+ ret = bq24190_setup_pdata(bdi, pdata);
+
+ if (ret) {
+ dev_err(dev, "Can't get irq info\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+ bq24190_irq_handler_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "bq24190-charger", bdi);
+ if (ret < 0) {
+ dev_err(dev, "Can't set up irq handler\n");
+ goto out1;
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
+
+ ret = bq24190_hw_init(bdi);
+ if (ret < 0) {
+ dev_err(dev, "Hardware init failed\n");
+ goto out2;
+ }
+
+ bq24190_charger_init(&bdi->charger);
+
+ ret = power_supply_register(dev, &bdi->charger);
+ if (ret) {
+ dev_err(dev, "Can't register charger\n");
+ goto out2;
+ }
+
+ bq24190_battery_init(&bdi->battery);
+
+ ret = power_supply_register(dev, &bdi->battery);
+ if (ret) {
+ dev_err(dev, "Can't register battery\n");
+ goto out3;
+ }
+
+ ret = bq24190_sysfs_create_group(bdi);
+ if (ret) {
+ dev_err(dev, "Can't create sysfs entries\n");
+ goto out4;
+ }
+
+ return 0;
+
+out4:
+ power_supply_unregister(&bdi->battery);
+out3:
+ power_supply_unregister(&bdi->charger);
+out2:
+ pm_runtime_disable(dev);
+out1:
+ if (bdi->gpio_int)
+ gpio_free(bdi->gpio_int);
+
+ return ret;
+}
+
+static int bq24190_remove(struct i2c_client *client)
+{
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ bq24190_sysfs_remove_group(bdi);
+ power_supply_unregister(&bdi->battery);
+ power_supply_unregister(&bdi->charger);
+ pm_runtime_disable(bdi->dev);
+
+ if (bdi->gpio_int)
+ gpio_free(bdi->gpio_int);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bq24190_pm_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ return 0;
+}
+
+static int bq24190_pm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ bdi->charger_health_valid = false;
+ bdi->battery_health_valid = false;
+ bdi->battery_status_valid = false;
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ /* Things may have changed while suspended so alert upper layer */
+ power_supply_changed(&bdi->charger);
+ power_supply_changed(&bdi->battery);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bq24190_pm_ops, bq24190_pm_suspend, bq24190_pm_resume);
+
+/*
+ * Only support the bq24190 right now. The bq24192, bq24192i, and bq24193
+ * are similar but not identical so the driver needs to be extended to
+ * support them.
+ */
+static const struct i2c_device_id bq24190_i2c_ids[] = {
+ { "bq24190", BQ24190_REG_VPRS_PN_24190 },
+ { },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id bq24190_of_match[] = {
+ { .compatible = "ti,bq24190", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bq24190_of_match);
+#else
+static const struct of_device_id bq24190_of_match[] = {
+ { },
+};
+#endif
+
+static struct i2c_driver bq24190_driver = {
+ .probe = bq24190_probe,
+ .remove = bq24190_remove,
+ .id_table = bq24190_i2c_ids,
+ .driver = {
+ .name = "bq24190-charger",
+ .owner = THIS_MODULE,
+ .pm = &bq24190_pm_ops,
+ .of_match_table = of_match_ptr(bq24190_of_match),
+ },
+};
+module_i2c_driver(bq24190_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark A. Greer <mgreer@animalcreek.com>");
+MODULE_ALIAS("i2c:bq24190-charger");
+MODULE_DESCRIPTION("TI BQ24190 Charger Driver");
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index c58d0e3..d02ae02 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -287,7 +287,7 @@ static struct gpio collie_batt_gpios[] = {
};
#ifdef CONFIG_PM
-static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
+static int collie_bat_suspend(struct ucb1x00_dev *dev)
{
/* flush all pending status updates */
flush_work(&bat_work);
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 0ee1e14..b4513f2 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -458,6 +458,7 @@ max8925_power_dt_init(struct platform_device *pdev)
of_property_read_u32(np, "fast-charge", &fast_charge);
of_property_read_u32(np, "no-insert-detect", &no_insert_detect);
of_property_read_u32(np, "no-temp-support", &no_temp_support);
+ of_node_put(np);
pdata->batt_detect = batt_detect;
pdata->fast_charge = fast_charge;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 3b2d5df..00e6672 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -67,23 +67,42 @@ static int __power_supply_changed_work(struct device *dev, void *data)
static void power_supply_changed_work(struct work_struct *work)
{
+ unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
dev_dbg(psy->dev, "%s\n", __func__);
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
-
- power_supply_update_leds(psy);
-
- kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ if (psy->changed) {
+ psy->changed = false;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
+ power_supply_update_leds(psy);
+ kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ }
+ /*
+ * Dependent power supplies (e.g. battery) may have changed state
+ * as a result of this event, so poll again and hold the
+ * wakeup_source until all events are processed.
+ */
+ if (!psy->changed)
+ pm_relax(psy->dev);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
}
void power_supply_changed(struct power_supply *psy)
{
+ unsigned long flags;
+
dev_dbg(psy->dev, "%s\n", __func__);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ psy->changed = true;
+ pm_stay_awake(psy->dev);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -500,6 +519,11 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto check_supplies_failed;
}
+ spin_lock_init(&psy->changed_lock);
+ rc = device_init_wakeup(dev, true);
+ if (rc)
+ goto wakeup_init_failed;
+
rc = kobject_set_name(&dev->kobj, "%s", psy->name);
if (rc)
goto kobject_set_name_failed;
@@ -529,6 +553,7 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
+wakeup_init_failed:
device_del(dev);
kobject_set_name_failed:
device_add_failed:
@@ -546,6 +571,7 @@ void power_supply_unregister(struct power_supply *psy)
power_supply_remove_triggers(psy);
psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
+ device_init_wakeup(psy->dev, false);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 29178f7..44420d1 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -118,7 +118,7 @@ static ssize_t power_supply_store_property(struct device *dev,
long long_val;
/* TODO: support other types than int */
- ret = strict_strtol(buf, 10, &long_val);
+ ret = kstrtol(buf, 10, &long_val);
if (ret < 0)
return ret;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ee039dc..9b3ea53 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -14,6 +14,12 @@ config POWER_RESET_GPIO
If your board needs a GPIO high/low to power down, say Y and
create a binding in your devicetree.
+config POWER_RESET_MSM
+ bool "Qualcomm MSM power-off driver"
+ depends on POWER_RESET && ARCH_MSM
+ help
+ Power off and restart support for Qualcomm boards.
+
config POWER_RESET_QNAP
bool "QNAP power-off driver"
depends on OF_GPIO && POWER_RESET && PLAT_ORION
@@ -34,7 +40,14 @@ config POWER_RESET_RESTART
config POWER_RESET_VEXPRESS
bool "ARM Versatile Express power-off and reset driver"
depends on ARM || ARM64
- depends on POWER_RESET
+ depends on POWER_RESET && VEXPRESS_CONFIG
help
Power off and reset support for the ARM Ltd. Versatile
Express boards.
+
+config POWER_RESET_XGENE
+ bool "APM SoC X-Gene reset driver"
+ depends on ARM64
+ depends on POWER_RESET
+ help
+ Reboot support for the APM SoC X-Gene Eval boards.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 372807f..3e6ed88 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
+obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
+obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
new file mode 100644
index 0000000..774f9a3
--- /dev/null
+++ b/drivers/power/reset/msm-poweroff.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+static void __iomem *msm_ps_hold;
+
+static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+ writel(0, msm_ps_hold);
+ mdelay(10000);
+}
+
+static void do_msm_poweroff(void)
+{
+ /* TODO: Add poweroff capability */
+ do_msm_restart(REBOOT_HARD, NULL);
+}
+
+static int msm_restart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ msm_ps_hold = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(msm_ps_hold))
+ return PTR_ERR(msm_ps_hold);
+
+ pm_power_off = do_msm_poweroff;
+ arm_pm_restart = do_msm_restart;
+ return 0;
+}
+
+static const struct of_device_id of_msm_restart_match[] = {
+ { .compatible = "qcom,pshold", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_msm_restart_match);
+
+static struct platform_driver msm_restart_driver = {
+ .probe = msm_restart_probe,
+ .driver = {
+ .name = "msm-restart",
+ .of_match_table = of_match_ptr(of_msm_restart_match),
+ },
+};
+
+static int __init msm_restart_init(void)
+{
+ return platform_driver_register(&msm_restart_driver);
+}
+device_initcall(msm_restart_init);
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
new file mode 100644
index 0000000..ecd55f8
--- /dev/null
+++ b/drivers/power/reset/xgene-reboot.c
@@ -0,0 +1,103 @@
+/*
+ * AppliedMicro X-Gene SoC Reboot Driver
+ *
+ * Copyright (c) 2013, Applied Micro Circuits Corporation
+ * Author: Feng Kan <fkan@apm.com>
+ * Author: Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * This driver provides system reboot functionality for APM X-Gene SoC.
+ * For system shutdown, this is board specify. If a board designer
+ * implements GPIO shutdown, use the gpio-poweroff.c driver.
+ */
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <asm/system_misc.h>
+
+struct xgene_reboot_context {
+ struct platform_device *pdev;
+ void *csr;
+ u32 mask;
+};
+
+static struct xgene_reboot_context *xgene_restart_ctx;
+
+static void xgene_restart(char str, const char *cmd)
+{
+ struct xgene_reboot_context *ctx = xgene_restart_ctx;
+ unsigned long timeout;
+
+ /* Issue the reboot */
+ if (ctx)
+ writel(ctx->mask, ctx->csr);
+
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout))
+ cpu_relax();
+
+ dev_emerg(&ctx->pdev->dev, "Unable to restart system\n");
+}
+
+static int xgene_reboot_probe(struct platform_device *pdev)
+{
+ struct xgene_reboot_context *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(&pdev->dev, "out of memory for context\n");
+ return -ENODEV;
+ }
+
+ ctx->csr = of_iomap(pdev->dev.of_node, 0);
+ if (!ctx->csr) {
+ devm_kfree(&pdev->dev, ctx);
+ dev_err(&pdev->dev, "can not map resource\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+ ctx->mask = 0xFFFFFFFF;
+
+ ctx->pdev = pdev;
+ arm_pm_restart = xgene_restart;
+ xgene_restart_ctx = ctx;
+
+ return 0;
+}
+
+static struct of_device_id xgene_reboot_of_match[] = {
+ { .compatible = "apm,xgene-reboot" },
+ {}
+};
+
+static struct platform_driver xgene_reboot_driver = {
+ .probe = xgene_reboot_probe,
+ .driver = {
+ .name = "xgene-reboot",
+ .of_match_table = xgene_reboot_of_match,
+ },
+};
+
+static int __init xgene_reboot_init(void)
+{
+ return platform_driver_register(&xgene_reboot_driver);
+}
+device_initcall(xgene_reboot_init);
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index 8a6288d..1bc5857 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -25,6 +25,10 @@
#include <linux/slab.h>
#include <linux/i2c/twl4030-madc.h>
+/* RX51 specific channels */
+#define TWL4030_MADC_BTEMP_RX51 TWL4030_MADC_ADCIN0
+#define TWL4030_MADC_BCI_RX51 TWL4030_MADC_ADCIN4
+
struct rx51_device_info {
struct device *dev;
struct power_supply bat;
@@ -37,7 +41,7 @@ static int rx51_battery_read_adc(int channel)
{
struct twl4030_madc_request req;
- req.channels = 1 << channel;
+ req.channels = channel;
req.do_avg = 1;
req.method = TWL4030_MADC_SW1;
req.func_cb = NULL;
@@ -47,7 +51,7 @@ static int rx51_battery_read_adc(int channel)
if (twl4030_madc_conversion(&req) <= 0)
return -ENODATA;
- return req.rbuf[channel];
+ return req.rbuf[ffs(channel) - 1];
}
/*
@@ -56,7 +60,7 @@ static int rx51_battery_read_adc(int channel)
*/
static int rx51_battery_read_voltage(struct rx51_device_info *di)
{
- int voltage = rx51_battery_read_adc(12);
+ int voltage = rx51_battery_read_adc(TWL4030_MADC_VBAT);
if (voltage < 0)
return voltage;
@@ -108,7 +112,7 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
{
int min = 0;
int max = ARRAY_SIZE(rx51_temp_table2) - 1;
- int raw = rx51_battery_read_adc(0);
+ int raw = rx51_battery_read_adc(TWL4030_MADC_BTEMP_RX51);
/* Zero and negative values are undefined */
if (raw <= 0)
@@ -142,7 +146,7 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
*/
static int rx51_battery_read_capacity(struct rx51_device_info *di)
{
- int capacity = rx51_battery_read_adc(4);
+ int capacity = rx51_battery_read_adc(TWL4030_MADC_BCI_RX51);
if (capacity < 0)
return capacity;
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 0224de5..f4d80df 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -150,7 +150,7 @@ static void tosa_bat_external_power_changed(struct power_supply *psy)
static irqreturn_t tosa_bat_gpio_isr(int irq, void *data)
{
- pr_info("tosa_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq)));
+ pr_info("tosa_bat_gpio irq\n");
schedule_work(&bat_work);
return IRQ_HANDLED;
}
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index be98e70..d98abe9 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -189,7 +189,12 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
/* Need to keep regulator on */
if (!bci->usb_enabled) {
- regulator_enable(bci->usb_reg);
+ ret = regulator_enable(bci->usb_reg);
+ if (ret) {
+ dev_err(bci->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
bci->usb_enabled = 1;
}
diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
new file mode 100644
index 0000000..7ef445a
--- /dev/null
+++ b/drivers/power/twl4030_madc_battery.c
@@ -0,0 +1,245 @@
+/*
+ * Dumb driver for LiIon batteries using TWL4030 madc.
+ *
+ * Copyright 2013 Golden Delicious Computers
+ * Lukas Märdian <lukas@goldelico.com>
+ *
+ * Based on dumb driver for gta01 battery
+ * Copyright 2009 Openmoko, Inc
+ * Balaji Rao <balajirrao@openmoko.org>
+ */
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/power/twl4030_madc_battery.h>
+
+struct twl4030_madc_battery {
+ struct power_supply psy;
+ struct twl4030_madc_bat_platform_data *pdata;
+};
+
+static enum power_supply_property twl4030_madc_bat_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+};
+
+static int madc_read(int index)
+{
+ struct twl4030_madc_request req;
+ int val;
+
+ req.channels = index;
+ req.method = TWL4030_MADC_SW2;
+ req.type = TWL4030_MADC_WAIT;
+ req.do_avg = 0;
+ req.raw = false;
+ req.func_cb = NULL;
+
+ val = twl4030_madc_conversion(&req);
+ if (val < 0)
+ return val;
+
+ return req.rbuf[ffs(index) - 1];
+}
+
+static int twl4030_madc_bat_get_charging_status(void)
+{
+ return (madc_read(TWL4030_MADC_ICHG) > 0) ? 1 : 0;
+}
+
+static int twl4030_madc_bat_get_voltage(void)
+{
+ return madc_read(TWL4030_MADC_VBAT);
+}
+
+static int twl4030_madc_bat_get_current(void)
+{
+ return madc_read(TWL4030_MADC_ICHG) * 1000;
+}
+
+static int twl4030_madc_bat_get_temp(void)
+{
+ return madc_read(TWL4030_MADC_BTEMP) * 10;
+}
+
+static int twl4030_madc_bat_voltscale(struct twl4030_madc_battery *bat,
+ int volt)
+{
+ struct twl4030_madc_bat_calibration *calibration;
+ int i, res = 0;
+
+ /* choose charging curve */
+ if (twl4030_madc_bat_get_charging_status())
+ calibration = bat->pdata->charging;
+ else
+ calibration = bat->pdata->discharging;
+
+ if (volt > calibration[0].voltage) {
+ res = calibration[0].level;
+ } else {
+ for (i = 0; calibration[i+1].voltage >= 0; i++) {
+ if (volt <= calibration[i].voltage &&
+ volt >= calibration[i+1].voltage) {
+ /* interval found - interpolate within range */
+ res = calibration[i].level -
+ ((calibration[i].voltage - volt) *
+ (calibration[i].level -
+ calibration[i+1].level)) /
+ (calibration[i].voltage -
+ calibration[i+1].voltage);
+ break;
+ }
+ }
+ }
+ return res;
+}
+
+static int twl4030_madc_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct twl4030_madc_battery *bat = container_of(psy,
+ struct twl4030_madc_battery, psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage()) > 95)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else {
+ if (twl4030_madc_bat_get_charging_status())
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = twl4030_madc_bat_get_voltage() * 1000;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = twl4030_madc_bat_get_current();
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ /* assume battery is always present */
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW: {
+ int percent = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ val->intval = (percent * bat->pdata->capacity) / 100;
+ break;
+ }
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = bat->pdata->capacity;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = twl4030_madc_bat_get_temp();
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: {
+ int percent = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ /* in mAh */
+ int chg = (percent * (bat->pdata->capacity/1000))/100;
+
+ /* assume discharge with 400 mA (ca. 1.5W) */
+ val->intval = (3600l * chg) / 400;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void twl4030_madc_bat_ext_changed(struct power_supply *psy)
+{
+ struct twl4030_madc_battery *bat = container_of(psy,
+ struct twl4030_madc_battery, psy);
+
+ power_supply_changed(&bat->psy);
+}
+
+static int twl4030_cmp(const void *a, const void *b)
+{
+ return ((struct twl4030_madc_bat_calibration *)b)->voltage -
+ ((struct twl4030_madc_bat_calibration *)a)->voltage;
+}
+
+static int twl4030_madc_battery_probe(struct platform_device *pdev)
+{
+ struct twl4030_madc_battery *twl4030_madc_bat;
+ struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
+
+ twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ if (!twl4030_madc_bat)
+ return -ENOMEM;
+
+ twl4030_madc_bat->psy.name = "twl4030_battery";
+ twl4030_madc_bat->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ twl4030_madc_bat->psy.properties = twl4030_madc_bat_props;
+ twl4030_madc_bat->psy.num_properties =
+ ARRAY_SIZE(twl4030_madc_bat_props);
+ twl4030_madc_bat->psy.get_property = twl4030_madc_bat_get_property;
+ twl4030_madc_bat->psy.external_power_changed =
+ twl4030_madc_bat_ext_changed;
+
+ /* sort charging and discharging calibration data */
+ sort(pdata->charging, pdata->charging_size,
+ sizeof(struct twl4030_madc_bat_calibration),
+ twl4030_cmp, NULL);
+ sort(pdata->discharging, pdata->discharging_size,
+ sizeof(struct twl4030_madc_bat_calibration),
+ twl4030_cmp, NULL);
+
+ twl4030_madc_bat->pdata = pdata;
+ platform_set_drvdata(pdev, twl4030_madc_bat);
+ power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
+
+ return 0;
+}
+
+static int twl4030_madc_battery_remove(struct platform_device *pdev)
+{
+ struct twl4030_madc_battery *bat = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&bat->psy);
+ kfree(bat);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_battery_driver = {
+ .driver = {
+ .name = "twl4030_madc_battery",
+ },
+ .probe = twl4030_madc_battery_probe,
+ .remove = twl4030_madc_battery_remove,
+};
+module_platform_driver(twl4030_madc_battery_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lukas Märdian <lukas@goldelico.com>");
+MODULE_DESCRIPTION("twl4030_madc battery driver");
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 6efd9b6..0c9f280 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -31,7 +31,7 @@ config PPS_CLIENT_PARPORT
config PPS_CLIENT_GPIO
tristate "PPS client using GPIO"
- depends on PPS && GENERIC_HARDIRQS
+ depends on PPS
help
If you say yes here you get support for a PPS source using
GPIO. To be useful you must also register a platform device
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index eae0eda..9966124 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -184,7 +184,6 @@ static int pps_gpio_remove(struct platform_device *pdev)
{
struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
pps_unregister_source(data->pps);
dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
return 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 9e3498b..9654aa3 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1249,6 +1249,15 @@ config RTC_DRV_SIRFSOC
Say "yes" here to support the real time clock on SiRF SOC chips.
This driver can also be built as a module called rtc-sirfsoc.
+config RTC_DRV_MOXART
+ tristate "MOXA ART RTC"
+ help
+ If you say yes here you get support for the MOXA ART
+ RTC module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-moxart
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index d3b4488..2dff3d2 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -130,3 +130,4 @@ obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o
+obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index be06d71..24e733c 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1018,23 +1018,6 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
cmos_do_remove(&pnp->dev);
}
-#ifdef CONFIG_PM
-
-static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
-{
- return cmos_suspend(&pnp->dev);
-}
-
-static int cmos_pnp_resume(struct pnp_dev *pnp)
-{
- return cmos_resume(&pnp->dev);
-}
-
-#else
-#define cmos_pnp_suspend NULL
-#define cmos_pnp_resume NULL
-#endif
-
static void cmos_pnp_shutdown(struct pnp_dev *pnp)
{
if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
@@ -1060,8 +1043,11 @@ static struct pnp_driver cmos_pnp_driver = {
/* flag ensures resume() gets called, and stops syslog spam */
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
- .suspend = cmos_pnp_suspend,
- .resume = cmos_pnp_resume,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &cmos_pm_ops,
+ },
+#endif
};
#endif /* CONFIG_PNP */
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 308a8fe..bc7b4fc 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -89,7 +89,6 @@ enum ds1511reg {
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
- int size; /* amount of memory mapped */
int irq;
unsigned int irqen;
int alrm_sec;
@@ -479,20 +478,14 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
struct rtc_plat_data *pdata;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
- pdev->name))
- return -EBUSY;
- ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
- if (!ds1511_base)
- return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ds1511_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ds1511_base))
+ return PTR_ERR(ds1511_base);
pdata->ioaddr = ds1511_base;
pdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 8c6c952..fd31571 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -285,19 +285,14 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
void __iomem *ioaddr;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
- pdev->name))
- return -EBUSY;
- ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
- if (!ioaddr)
- return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index eccdc62..17b73fd 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -52,11 +52,9 @@
#define RTC_BATT_FLAG 0x80
struct rtc_plat_data {
- struct rtc_device *rtc;
void __iomem *ioaddr_nvram;
void __iomem *ioaddr_rtc;
size_t size_nvram;
- size_t size;
unsigned long last_jiffies;
struct bin_attribute nvram_attr;
};
@@ -117,11 +115,7 @@ static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* year is 1900 + tm->tm_year */
tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900;
- if (rtc_valid_tm(tm) < 0) {
- dev_err(dev, "retrieved date/time is not valid.\n");
- rtc_time_to_tm(0, tm);
- }
- return 0;
+ return rtc_valid_tm(tm);
}
static const struct rtc_class_ops ds1742_rtc_ops = {
@@ -168,22 +162,17 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
void __iomem *ioaddr;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
- pdev->name))
- return -EBUSY;
- ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
- if (!ioaddr)
- return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
pdata->ioaddr_nvram = ioaddr;
- pdata->size_nvram = pdata->size - RTC_SIZE;
+ pdata->size_nvram = resource_size(res) - RTC_SIZE;
pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
sysfs_bin_attr_init(&pdata->nvram_attr);
@@ -212,7 +201,6 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
&ds1742_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- pdata->rtc = rtc;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 549b3c3..580e7b5 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -138,17 +138,9 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- ep93xx_rtc->mmio_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ep93xx_rtc->mmio_base)
- return -ENXIO;
+ ep93xx_rtc->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ep93xx_rtc->mmio_base))
+ return PTR_ERR(ep93xx_rtc->mmio_base);
pdev->dev.platform_data = ep93xx_rtc;
platform_set_drvdata(pdev, ep93xx_rtc);
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 7273b01..4e2a818 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -23,10 +23,6 @@
#include <linux/iio/iio.h>
#include <linux/rtc.h>
-/* Format: HID-SENSOR-usage_id_in_hex */
-/* Usage ID from spec for Time: 0x2000A0 */
-#define DRIVER_NAME "HID-SENSOR-2000a0" /* must be lowercase */
-
enum hid_time_channel {
CHANNEL_SCAN_INDEX_YEAR,
CHANNEL_SCAN_INDEX_MONTH,
@@ -283,9 +279,11 @@ static int hid_time_probe(struct platform_device *pdev)
"hid-sensor-time", &hid_time_rtc_ops,
THIS_MODULE);
- if (IS_ERR(time_state->rtc)) {
+ if (IS_ERR_OR_NULL(time_state->rtc)) {
+ ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV;
+ time_state->rtc = NULL;
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
dev_err(&pdev->dev, "rtc device register failed!\n");
- return PTR_ERR(time_state->rtc);
}
return ret;
@@ -300,9 +298,19 @@ static int hid_time_remove(struct platform_device *pdev)
return 0;
}
+static struct platform_device_id hid_time_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-2000a0",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_time_ids);
+
static struct platform_driver hid_time_platform_driver = {
+ .id_table = hid_time_ids,
.driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = hid_time_probe,
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index d3a8c8e..abd7f90 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -375,24 +375,16 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
struct imxdi_dev *imxdi;
int rc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
imxdi = devm_kzalloc(&pdev->dev, sizeof(*imxdi), GFP_KERNEL);
if (!imxdi)
return -ENOMEM;
imxdi->pdev = pdev;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name))
- return -EBUSY;
-
- imxdi->ioaddr = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (imxdi->ioaddr == NULL)
- return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ imxdi->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(imxdi->ioaddr))
+ return PTR_ERR(imxdi->ioaddr);
spin_lock_init(&imxdi->irq_lock);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 8276ae9..bfdbcb8 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -201,16 +201,9 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct lpc32xx_rtc *rtc;
- resource_size_t size;
int rtcirq;
u32 tmp;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Can't get memory resource\n");
- return -ENOENT;
- }
-
rtcirq = platform_get_irq(pdev, 0);
if (rtcirq < 0 || rtcirq >= NR_IRQS) {
dev_warn(&pdev->dev, "Can't get interrupt resource\n");
@@ -224,19 +217,10 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
}
rtc->irq = rtcirq;
- size = resource_size(res);
-
- if (!devm_request_mem_region(&pdev->dev, res->start, size,
- pdev->name)) {
- dev_err(&pdev->dev, "RTC registers are not free\n");
- return -EBUSY;
- }
-
- rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size);
- if (!rtc->rtc_base) {
- dev_err(&pdev->dev, "Can't map memory\n");
- return -ENOMEM;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtc->rtc_base))
+ return PTR_ERR(rtc->rtc_base);
spin_lock_init(&rtc->lock);
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 9915cb9..9efe118 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -240,9 +240,9 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
alrm->pending = 0;
- ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS1, &val);
+ ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
if (ret < 0) {
- dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+ dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
__func__, __LINE__, ret);
goto out;
}
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
new file mode 100644
index 0000000..c29dee0
--- /dev/null
+++ b/drivers/rtc/rtc-moxart.c
@@ -0,0 +1,330 @@
+/*
+ * MOXA ART RTC driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#define GPIO_RTC_RESERVED 0x0C
+#define GPIO_RTC_DATA_SET 0x10
+#define GPIO_RTC_DATA_CLEAR 0x14
+#define GPIO_RTC_PIN_PULL_ENABLE 0x18
+#define GPIO_RTC_PIN_PULL_TYPE 0x1C
+#define GPIO_RTC_INT_ENABLE 0x20
+#define GPIO_RTC_INT_RAW_STATE 0x24
+#define GPIO_RTC_INT_MASKED_STATE 0x28
+#define GPIO_RTC_INT_MASK 0x2C
+#define GPIO_RTC_INT_CLEAR 0x30
+#define GPIO_RTC_INT_TRIGGER 0x34
+#define GPIO_RTC_INT_BOTH 0x38
+#define GPIO_RTC_INT_RISE_NEG 0x3C
+#define GPIO_RTC_BOUNCE_ENABLE 0x40
+#define GPIO_RTC_BOUNCE_PRE_SCALE 0x44
+#define GPIO_RTC_PROTECT_W 0x8E
+#define GPIO_RTC_PROTECT_R 0x8F
+#define GPIO_RTC_YEAR_W 0x8C
+#define GPIO_RTC_YEAR_R 0x8D
+#define GPIO_RTC_DAY_W 0x8A
+#define GPIO_RTC_DAY_R 0x8B
+#define GPIO_RTC_MONTH_W 0x88
+#define GPIO_RTC_MONTH_R 0x89
+#define GPIO_RTC_DATE_W 0x86
+#define GPIO_RTC_DATE_R 0x87
+#define GPIO_RTC_HOURS_W 0x84
+#define GPIO_RTC_HOURS_R 0x85
+#define GPIO_RTC_MINUTES_W 0x82
+#define GPIO_RTC_MINUTES_R 0x83
+#define GPIO_RTC_SECONDS_W 0x80
+#define GPIO_RTC_SECONDS_R 0x81
+#define GPIO_RTC_DELAY_TIME 8
+
+struct moxart_rtc {
+ struct rtc_device *rtc;
+ spinlock_t rtc_lock;
+ int gpio_data, gpio_sclk, gpio_reset;
+};
+
+static int day_of_year[12] = { 0, 31, 59, 90, 120, 151, 181,
+ 212, 243, 273, 304, 334 };
+
+static void moxart_rtc_write_byte(struct device *dev, u8 data)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < 8; i++, data >>= 1) {
+ gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ gpio_set_value(moxart_rtc->gpio_data, ((data & 1) == 1));
+ udelay(GPIO_RTC_DELAY_TIME);
+ gpio_set_value(moxart_rtc->gpio_sclk, 1);
+ udelay(GPIO_RTC_DELAY_TIME);
+ }
+}
+
+static u8 moxart_rtc_read_byte(struct device *dev)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ int i;
+ u8 data = 0;
+
+ for (i = 0; i < 8; i++) {
+ gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ udelay(GPIO_RTC_DELAY_TIME);
+ gpio_set_value(moxart_rtc->gpio_sclk, 1);
+ udelay(GPIO_RTC_DELAY_TIME);
+ if (gpio_get_value(moxart_rtc->gpio_data))
+ data |= (1 << i);
+ udelay(GPIO_RTC_DELAY_TIME);
+ }
+ return data;
+}
+
+static u8 moxart_rtc_read_register(struct device *dev, u8 cmd)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ u8 data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ gpio_direction_output(moxart_rtc->gpio_data, 0);
+ gpio_set_value(moxart_rtc->gpio_reset, 1);
+ udelay(GPIO_RTC_DELAY_TIME);
+ moxart_rtc_write_byte(dev, cmd);
+ gpio_direction_input(moxart_rtc->gpio_data);
+ udelay(GPIO_RTC_DELAY_TIME);
+ data = moxart_rtc_read_byte(dev);
+ gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ gpio_set_value(moxart_rtc->gpio_reset, 0);
+ udelay(GPIO_RTC_DELAY_TIME);
+
+ local_irq_restore(flags);
+
+ return data;
+}
+
+static void moxart_rtc_write_register(struct device *dev, u8 cmd, u8 data)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ gpio_direction_output(moxart_rtc->gpio_data, 0);
+ gpio_set_value(moxart_rtc->gpio_reset, 1);
+ udelay(GPIO_RTC_DELAY_TIME);
+ moxart_rtc_write_byte(dev, cmd);
+ moxart_rtc_write_byte(dev, data);
+ gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ gpio_set_value(moxart_rtc->gpio_reset, 0);
+ udelay(GPIO_RTC_DELAY_TIME);
+
+ local_irq_restore(flags);
+}
+
+static int moxart_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&moxart_rtc->rtc_lock);
+
+ moxart_rtc_write_register(dev, GPIO_RTC_PROTECT_W, 0);
+ moxart_rtc_write_register(dev, GPIO_RTC_YEAR_W,
+ (((tm->tm_year - 100) / 10) << 4) |
+ ((tm->tm_year - 100) % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_MONTH_W,
+ (((tm->tm_mon + 1) / 10) << 4) |
+ ((tm->tm_mon + 1) % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_DATE_W,
+ ((tm->tm_mday / 10) << 4) |
+ (tm->tm_mday % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_HOURS_W,
+ ((tm->tm_hour / 10) << 4) |
+ (tm->tm_hour % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_MINUTES_W,
+ ((tm->tm_min / 10) << 4) |
+ (tm->tm_min % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_SECONDS_W,
+ ((tm->tm_sec / 10) << 4) |
+ (tm->tm_sec % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_PROTECT_W, 0x80);
+
+ spin_unlock_irq(&moxart_rtc->rtc_lock);
+
+ dev_dbg(dev, "%s: success tm_year=%d tm_mon=%d\n"
+ "tm_mday=%d tm_hour=%d tm_min=%d tm_sec=%d\n",
+ __func__, tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ return 0;
+}
+
+static int moxart_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ unsigned char v;
+
+ spin_lock_irq(&moxart_rtc->rtc_lock);
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_SECONDS_R);
+ tm->tm_sec = (((v & 0x70) >> 4) * 10) + (v & 0x0F);
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_MINUTES_R);
+ tm->tm_min = (((v & 0x70) >> 4) * 10) + (v & 0x0F);
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_HOURS_R);
+ if (v & 0x80) { /* 12-hour mode */
+ tm->tm_hour = (((v & 0x10) >> 4) * 10) + (v & 0x0F);
+ if (v & 0x20) { /* PM mode */
+ tm->tm_hour += 12;
+ if (tm->tm_hour >= 24)
+ tm->tm_hour = 0;
+ }
+ } else { /* 24-hour mode */
+ tm->tm_hour = (((v & 0x30) >> 4) * 10) + (v & 0x0F);
+ }
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_DATE_R);
+ tm->tm_mday = (((v & 0x30) >> 4) * 10) + (v & 0x0F);
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_MONTH_R);
+ tm->tm_mon = (((v & 0x10) >> 4) * 10) + (v & 0x0F);
+ tm->tm_mon--;
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_YEAR_R);
+ tm->tm_year = (((v & 0xF0) >> 4) * 10) + (v & 0x0F);
+ tm->tm_year += 100;
+ if (tm->tm_year <= 69)
+ tm->tm_year += 100;
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_DAY_R);
+ tm->tm_wday = (v & 0x0f) - 1;
+ tm->tm_yday = day_of_year[tm->tm_mon];
+ tm->tm_yday += (tm->tm_mday - 1);
+ if (tm->tm_mon >= 2) {
+ if (!(tm->tm_year % 4) && (tm->tm_year % 100))
+ tm->tm_yday++;
+ }
+
+ tm->tm_isdst = 0;
+
+ spin_unlock_irq(&moxart_rtc->rtc_lock);
+
+ return 0;
+}
+
+static const struct rtc_class_ops moxart_rtc_ops = {
+ .read_time = moxart_rtc_read_time,
+ .set_time = moxart_rtc_set_time,
+};
+
+static int moxart_rtc_probe(struct platform_device *pdev)
+{
+ struct moxart_rtc *moxart_rtc;
+ int ret = 0;
+
+ moxart_rtc = devm_kzalloc(&pdev->dev, sizeof(*moxart_rtc), GFP_KERNEL);
+ if (!moxart_rtc) {
+ dev_err(&pdev->dev, "devm_kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ moxart_rtc->gpio_data = of_get_named_gpio(pdev->dev.of_node,
+ "gpio-rtc-data", 0);
+ if (!gpio_is_valid(moxart_rtc->gpio_data)) {
+ dev_err(&pdev->dev, "invalid gpio (data): %d\n",
+ moxart_rtc->gpio_data);
+ return moxart_rtc->gpio_data;
+ }
+
+ moxart_rtc->gpio_sclk = of_get_named_gpio(pdev->dev.of_node,
+ "gpio-rtc-sclk", 0);
+ if (!gpio_is_valid(moxart_rtc->gpio_sclk)) {
+ dev_err(&pdev->dev, "invalid gpio (sclk): %d\n",
+ moxart_rtc->gpio_sclk);
+ return moxart_rtc->gpio_sclk;
+ }
+
+ moxart_rtc->gpio_reset = of_get_named_gpio(pdev->dev.of_node,
+ "gpio-rtc-reset", 0);
+ if (!gpio_is_valid(moxart_rtc->gpio_reset)) {
+ dev_err(&pdev->dev, "invalid gpio (reset): %d\n",
+ moxart_rtc->gpio_reset);
+ return moxart_rtc->gpio_reset;
+ }
+
+ spin_lock_init(&moxart_rtc->rtc_lock);
+ platform_set_drvdata(pdev, moxart_rtc);
+
+ ret = devm_gpio_request(&pdev->dev, moxart_rtc->gpio_data, "rtc_data");
+ if (ret) {
+ dev_err(&pdev->dev, "can't get rtc_data gpio\n");
+ return ret;
+ }
+
+ ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_sclk,
+ GPIOF_DIR_OUT, "rtc_sclk");
+ if (ret) {
+ dev_err(&pdev->dev, "can't get rtc_sclk gpio\n");
+ return ret;
+ }
+
+ ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_reset,
+ GPIOF_DIR_OUT, "rtc_reset");
+ if (ret) {
+ dev_err(&pdev->dev, "can't get rtc_reset gpio\n");
+ return ret;
+ }
+
+ moxart_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &moxart_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(moxart_rtc->rtc)) {
+ dev_err(&pdev->dev, "devm_rtc_device_register failed\n");
+ return PTR_ERR(moxart_rtc->rtc);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id moxart_rtc_match[] = {
+ { .compatible = "moxa,moxart-rtc" },
+ { },
+};
+
+static struct platform_driver moxart_rtc_driver = {
+ .probe = moxart_rtc_probe,
+ .driver = {
+ .name = "moxart-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_rtc_match,
+ },
+};
+module_platform_driver(moxart_rtc_driver);
+
+MODULE_DESCRIPTION("MOXART RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index baab802..d536c59 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -221,26 +221,17 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_plat_data *pdata;
- resource_size_t size;
u32 rtc_time;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, res->start, size,
- pdev->name))
- return -EBUSY;
-
- pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, size);
- if (!pdata->ioaddr)
- return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr))
+ return PTR_ERR(pdata->ioaddr);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
/* Not all SoCs require a clock.*/
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index ab87bac..50c5726 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -377,22 +377,16 @@ static int mxc_rtc_probe(struct platform_device *pdev)
unsigned long rate;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->devtype = pdev->id_entry->driver_data;
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- pdata->ioaddr = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr))
+ return PTR_ERR(pdata->ioaddr);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 22861c5..248653c 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -99,7 +99,7 @@ static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
if (!timeout)
return ERR_PTR(-EPERM);
- return 0;
+ return NULL;
}
static int nuc900_rtc_bcd2bin(unsigned int timereg,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c6ffbae..c7d97ee 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -70,6 +70,8 @@
#define OMAP_RTC_KICK0_REG 0x6c
#define OMAP_RTC_KICK1_REG 0x70
+#define OMAP_RTC_IRQWAKEEN 0x7c
+
/* OMAP_RTC_CTRL_REG bit fields: */
#define OMAP_RTC_CTRL_SPLIT (1<<7)
#define OMAP_RTC_CTRL_DISABLE (1<<6)
@@ -94,12 +96,21 @@
#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3)
#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2)
+/* OMAP_RTC_IRQWAKEEN bit fields: */
+#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN (1<<1)
+
/* OMAP_RTC_KICKER values */
#define KICK0_VALUE 0x83e70b13
#define KICK1_VALUE 0x95a4f1e0
#define OMAP_RTC_HAS_KICKER 0x1
+/*
+ * Few RTC IP revisions has special WAKE-EN Register to enable Wakeup
+ * generation for event Alarm.
+ */
+#define OMAP_RTC_HAS_IRQWAKEEN 0x2
+
static void __iomem *rtc_base;
#define rtc_read(addr) readb(rtc_base + (addr))
@@ -299,12 +310,18 @@ static struct rtc_class_ops omap_rtc_ops = {
static int omap_rtc_alarm;
static int omap_rtc_timer;
-#define OMAP_RTC_DATA_DA830_IDX 1
+#define OMAP_RTC_DATA_AM3352_IDX 1
+#define OMAP_RTC_DATA_DA830_IDX 2
static struct platform_device_id omap_rtc_devtype[] = {
{
.name = DRIVER_NAME,
- }, {
+ },
+ [OMAP_RTC_DATA_AM3352_IDX] = {
+ .name = "am3352-rtc",
+ .driver_data = OMAP_RTC_HAS_KICKER | OMAP_RTC_HAS_IRQWAKEEN,
+ },
+ [OMAP_RTC_DATA_DA830_IDX] = {
.name = "da830-rtc",
.driver_data = OMAP_RTC_HAS_KICKER,
},
@@ -316,6 +333,9 @@ static const struct of_device_id omap_rtc_of_match[] = {
{ .compatible = "ti,da830-rtc",
.data = &omap_rtc_devtype[OMAP_RTC_DATA_DA830_IDX],
},
+ { .compatible = "ti,am3352-rtc",
+ .data = &omap_rtc_devtype[OMAP_RTC_DATA_AM3352_IDX],
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
@@ -464,16 +484,28 @@ static u8 irqstat;
static int omap_rtc_suspend(struct device *dev)
{
+ u8 irqwake_stat;
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(pdev);
+
irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG);
/* FIXME the RTC alarm is not currently acting as a wakeup event
- * source, and in fact this enable() call is just saving a flag
- * that's never used...
+ * source on some platforms, and in fact this enable() call is just
+ * saving a flag that's never used...
*/
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
enable_irq_wake(omap_rtc_alarm);
- else
+
+ if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) {
+ irqwake_stat = rtc_read(OMAP_RTC_IRQWAKEEN);
+ irqwake_stat |= OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
+ rtc_write(irqwake_stat, OMAP_RTC_IRQWAKEEN);
+ }
+ } else {
rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+ }
/* Disable the clock/module */
pm_runtime_put_sync(dev);
@@ -483,13 +515,25 @@ static int omap_rtc_suspend(struct device *dev)
static int omap_rtc_resume(struct device *dev)
{
+ u8 irqwake_stat;
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(pdev);
+
/* Enable the clock/module so that we can access the registers */
pm_runtime_get_sync(dev);
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
disable_irq_wake(omap_rtc_alarm);
- else
+
+ if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) {
+ irqwake_stat = rtc_read(OMAP_RTC_IRQWAKEEN);
+ irqwake_stat &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
+ rtc_write(irqwake_stat, OMAP_RTC_IRQWAKEEN);
+ }
+ } else {
rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG);
+ }
return 0;
}
#endif
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index a1fecc8..fffb7d3 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -238,6 +238,15 @@ static int palmas_rtc_probe(struct platform_device *pdev)
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct palmas_rtc *palmas_rtc = NULL;
int ret;
+ bool enable_bb_charging = false;
+ bool high_bb_charging;
+
+ if (pdev->dev.of_node) {
+ enable_bb_charging = of_property_read_bool(pdev->dev.of_node,
+ "ti,backup-battery-chargeable");
+ high_bb_charging = of_property_read_bool(pdev->dev.of_node,
+ "ti,backup-battery-charge-high-current");
+ }
palmas_rtc = devm_kzalloc(&pdev->dev, sizeof(struct palmas_rtc),
GFP_KERNEL);
@@ -254,6 +263,32 @@ static int palmas_rtc_probe(struct platform_device *pdev)
palmas_rtc->dev = &pdev->dev;
platform_set_drvdata(pdev, palmas_rtc);
+ if (enable_bb_charging) {
+ unsigned reg = PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG;
+
+ if (high_bb_charging)
+ reg = 0;
+
+ ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+ PALMAS_BACKUP_BATTERY_CTRL,
+ PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG, reg);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "BACKUP_BATTERY_CTRL update failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+ PALMAS_BACKUP_BATTERY_CTRL,
+ PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN,
+ PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "BACKUP_BATTERY_CTRL update failed, %d\n", ret);
+ return ret;
+ }
+ }
+
/* Start RTC */
ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
PALMAS_RTC_CTRL_REG_STOP_RTC,
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 205b9f7..1ee514a 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -203,11 +203,6 @@ static int pcf2127_probe(struct i2c_client *client,
return 0;
}
-static int pcf2127_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static const struct i2c_device_id pcf2127_id[] = {
{ "pcf2127", 0 },
{ }
@@ -229,7 +224,6 @@ static struct i2c_driver pcf2127_driver = {
.of_match_table = of_match_ptr(pcf2127_of_match),
},
.probe = pcf2127_probe,
- .remove = pcf2127_remove,
.id_table = pcf2127_id,
};
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index aa7ed4b..63460cf 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -44,6 +44,7 @@ struct sirfsoc_rtc_drv {
struct rtc_device *rtc;
u32 rtc_base;
u32 irq;
+ unsigned irq_wake;
/* Overflow for every 8 years extra time */
u32 overflow_rtc;
#ifdef CONFIG_PM
@@ -355,8 +356,8 @@ static int sirfsoc_rtc_suspend(struct device *dev)
rtcdrv->saved_counter =
sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc;
- if (device_may_wakeup(&pdev->dev))
- enable_irq_wake(rtcdrv->irq);
+ if (device_may_wakeup(&pdev->dev) && !enable_irq_wake(rtcdrv->irq))
+ rtcdrv->irq_wake = 1;
return 0;
}
@@ -423,8 +424,10 @@ static int sirfsoc_rtc_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
sirfsoc_rtc_thaw(dev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
disable_irq_wake(rtcdrv->irq);
+ rtcdrv->irq_wake = 0;
+ }
return 0;
}
@@ -434,8 +437,10 @@ static int sirfsoc_rtc_restore(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
disable_irq_wake(rtcdrv->irq);
+ rtcdrv->irq_wake = 0;
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index af5e97e..a176ba6 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -294,19 +294,14 @@ static int stk17ta8_rtc_probe(struct platform_device *pdev)
void __iomem *ioaddr;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
- pdev->name))
- return -EBUSY;
- ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
- if (!ioaddr)
- return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index f9a0677..4f87234 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -244,9 +244,6 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
struct resource *res;
int irq, ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
@@ -255,13 +252,10 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
- pdata->rtcreg = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!pdata->rtcreg)
- return -EBUSY;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->rtcreg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->rtcreg))
+ return PTR_ERR(pdata->rtcreg);
spin_lock_init(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index feca317..92bd22c 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -645,7 +645,7 @@ dasd_diag_init(void)
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_interrupt(0x2603, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
@@ -655,7 +655,7 @@ static void __exit
dasd_diag_cleanup(void)
{
unregister_external_interrupt(0x2603, dasd_ext_handler);
- service_subclass_irq_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
dasd_diag_discipline_pointer = NULL;
}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 96e52bf..f93cc32 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -524,20 +524,20 @@ static const struct file_operations fs3270_fops = {
.llseek = no_llseek,
};
-void fs3270_create_cb(int minor)
+static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
-void fs3270_destroy_cb(int minor)
+static void fs3270_destroy_cb(int minor)
{
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
-struct raw3270_notifier fs3270_notifier =
+static struct raw3270_notifier fs3270_notifier =
{
.create = fs3270_create_cb,
.destroy = fs3270_destroy_cb,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 3e4fb4e..a3aa374 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -910,12 +910,12 @@ sclp_check_interface(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
/* Wait for signal from interrupt or timeout */
sclp_sync_wait();
/* Disable service-signal interruption - needs to happen
* with IRQs enabled. */
- service_subclass_irq_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1131,7 +1131,7 @@ sclp_init(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
* IRQs enabled. */
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
sclp_init_mask(1);
return 0;
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index cee69da..a0f47c8 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1845,17 +1845,17 @@ static const struct tty_operations tty3270_ops = {
.set_termios = tty3270_set_termios
};
-void tty3270_create_cb(int minor)
+static void tty3270_create_cb(int minor)
{
tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
}
-void tty3270_destroy_cb(int minor)
+static void tty3270_destroy_cb(int minor)
{
tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
}
-struct raw3270_notifier tty3270_notifier =
+static struct raw3270_notifier tty3270_notifier =
{
.create = tty3270_create_cb,
.destroy = tty3270_destroy_cb,
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 9e5e146..794820a 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -30,8 +30,8 @@
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
-#define TO_USER 0
-#define TO_KERNEL 1
+#define TO_USER 1
+#define TO_KERNEL 0
#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
enum arch_id {
@@ -73,7 +73,7 @@ static struct ipl_parameter_block *ipl_block;
* @count: Size of buffer, which should be copied
* @mode: Either TO_KERNEL or TO_USER
*/
-static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
+int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
{
int offs, blk_num;
static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index d4174b8..02300dc 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -413,7 +413,7 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
register unsigned long reg2 asm ("2") = (unsigned long) msg;
register unsigned long reg3 asm ("3") = (unsigned long) length;
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = (unsigned int) psmid;
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
if (special == 1)
reg0 |= 0x400000UL;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 2ea6165..af2166f 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -472,7 +472,7 @@ static int __init kvm_devices_init(void)
INIT_WORK(&hotplug_work, hotplug_devices);
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_interrupt(0x2603, kvm_extint_handler);
scan_devices();
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 6917b4f..22d5a94 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -692,7 +692,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
* ID as valid.
*/
if (ahc_get_pci_function(pci) > 0
- && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice)
+ && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor)
&& SUBID_9005_MFUNCENB(subdevice) == 0)
return (NULL);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 08b22a9..d7ca930 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -105,7 +105,7 @@
#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
-#define BNX2FC_5771X_DB_PAGE_SIZE 128
+#define BNX2X_DB_SHIFT 3
#define BNX2FC_TASK_SIZE 128
#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index c0d035a..46a3765 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1421,8 +1421,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
- reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
- (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
+ reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
if (!tgt->ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 6940f09..c73bbcb 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -64,7 +64,7 @@
#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
-#define BNX2I_5771X_DBELL_PAGE_SIZE 128
+#define BNX2X_DB_SHIFT 3
/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
#define MAX_BD_LENGTH 65535
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index af3e675..5be718c 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2738,8 +2738,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
- reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) +
- DPM_TRIGER_TYPE;
+ reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
goto arm_cq;
}
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 8582929..2ec3c23 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -860,8 +860,13 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
return false;
}
+ if (fsc->command >= cmdcnt) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
func = cmd_to_fls_func[fsc->command];
- if (fsc->command >= cmdcnt || func == 0xFF) {
+ if (func == 0xFF) {
fs->status = ATTO_STS_INV_FUNC;
return false;
}
@@ -1355,7 +1360,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
u32 time = jiffies_to_msecs(jiffies);
esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
- memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+ *n = default_sas_nvram;
n->sas_addr[3] |= 0x0F;
n->sas_addr[4] = HIBYTE(LOWORD(time));
n->sas_addr[5] = LOBYTE(LOWORD(time));
@@ -1373,7 +1378,7 @@ void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
* address out first.
*/
memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
- memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+ *nvram = default_sas_nvram;
memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
}
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 3a798e7..da1869d 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -665,7 +665,7 @@ void esas2r_kill_adapter(int i)
int esas2r_cleanup(struct Scsi_Host *host)
{
- struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+ struct esas2r_adapter *a;
int index;
if (host == NULL) {
@@ -678,6 +678,7 @@ int esas2r_cleanup(struct Scsi_Host *host)
}
esas2r_debug("esas2r_cleanup called for host %p", host);
+ a = (struct esas2r_adapter *)host->hostdata;
index = a->index;
esas2r_kill_adapter(index);
return index;
@@ -808,7 +809,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
int pcie_cap_reg;
pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
- if (0xffff && pcie_cap_reg) {
+ if (0xffff & pcie_cap_reg) {
u16 devcontrol;
pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@@ -1550,8 +1551,7 @@ void esas2r_reset_chip(struct esas2r_adapter *a)
* to not overwrite a previous crash that was saved.
*/
if ((a->flags2 & AF2_COREDUMP_AVAIL)
- && !(a->flags2 & AF2_COREDUMP_SAVED)
- && a->fw_coredump_buff) {
+ && !(a->flags2 & AF2_COREDUMP_SAVED)) {
esas2r_read_mem_block(a,
a->fw_coredump_buff,
MW_DATA_ADDR_SRAM + 0x80000,
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index f3d0cb8..e5b0902 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -415,7 +415,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
lun = tm->lun;
}
- if (path > 0 || tid > ESAS2R_MAX_ID) {
+ if (path > 0) {
rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
CSMI_STS_INV_PARAM);
return false;
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
index f8ec6d6..fd13928 100644
--- a/drivers/scsi/esas2r/esas2r_vda.c
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -302,6 +302,7 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
+ char buf[sizeof(cfg->data.init.fw_release) + 1];
cfg->data_length =
cpu_to_le32(sizeof(struct atto_vda_cfg_init));
@@ -309,11 +310,13 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
le32_to_cpu(rsp->vda_version);
cfg->data.init.fw_build = rsp->fw_build;
- sprintf((char *)&cfg->data.init.fw_release,
- "%1d.%02d",
+ snprintf(buf, sizeof(buf), "%1d.%02d",
(int)LOBYTE(le16_to_cpu(rsp->fw_release)),
(int)HIBYTE(le16_to_cpu(rsp->fw_release)));
+ memcpy(&cfg->data.init.fw_release, buf,
+ sizeof(cfg->data.init.fw_release));
+
if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
cfg->data.init.fw_version =
cfg->data.init.fw_build;
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index c18c681..e4dd3d7 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -43,6 +43,8 @@
#define DFX DRV_NAME "%d: "
#define DESC_CLEAN_LOW_WATERMARK 8
+#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
+#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 32
@@ -154,6 +156,9 @@ do { \
FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
+#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \
+ shost_printk(kern_level, host, fmt, ##args)
+
extern const char *fnic_state_str[];
enum fnic_intx_intr_index {
@@ -215,10 +220,12 @@ struct fnic {
struct vnic_stats *stats;
unsigned long stats_time; /* time of stats update */
+ unsigned long stats_reset_time; /* time of stats reset */
struct vnic_nic_cfg *nic_cfg;
char name[IFNAMSIZ];
struct timer_list notify_timer; /* used for MSI interrupts */
+ unsigned int fnic_max_tag_id;
unsigned int err_intr_offset;
unsigned int link_intr_offset;
@@ -359,4 +366,5 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
return ((fnic->state_flags & st_flags) == st_flags);
}
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
+void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 42e15ee..bbf81ea 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -74,6 +74,10 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
"for fnic trace buffer");
+static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
+module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
+
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
.lport_set_port_id = fnic_set_port_id,
@@ -91,7 +95,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
- scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
+ scsi_activate_tcq(sdev, fnic_max_qdepth);
return 0;
}
@@ -126,6 +130,7 @@ fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
static void fnic_get_host_speed(struct Scsi_Host *shost);
static struct scsi_transport_template *fnic_fc_transport;
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
+static void fnic_reset_host_stats(struct Scsi_Host *);
static struct fc_function_template fnic_fc_functions = {
@@ -153,6 +158,7 @@ static struct fc_function_template fnic_fc_functions = {
.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
.issue_fc_host_lip = fnic_reset,
.get_fc_host_stats = fnic_get_stats,
+ .reset_fc_host_stats = fnic_reset_host_stats,
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
.terminate_rport_io = fnic_terminate_rport_io,
.bsg_request = fc_lport_bsg_request,
@@ -206,13 +212,116 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
stats->invalid_crc_count = vs->rx.rx_crc_errors;
- stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
+ stats->seconds_since_last_reset =
+ (jiffies - fnic->stats_reset_time) / HZ;
stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
return stats;
}
+/*
+ * fnic_dump_fchost_stats
+ * note : dumps fc_statistics into system logs
+ */
+void fnic_dump_fchost_stats(struct Scsi_Host *host,
+ struct fc_host_statistics *stats)
+{
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: seconds since last reset = %llu\n",
+ stats->seconds_since_last_reset);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: tx frames = %llu\n",
+ stats->tx_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: tx words = %llu\n",
+ stats->tx_words);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: rx frames = %llu\n",
+ stats->rx_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: rx words = %llu\n",
+ stats->rx_words);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: lip count = %llu\n",
+ stats->lip_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: nos count = %llu\n",
+ stats->nos_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: error frames = %llu\n",
+ stats->error_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: dumped frames = %llu\n",
+ stats->dumped_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: link failure count = %llu\n",
+ stats->link_failure_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: loss of sync count = %llu\n",
+ stats->loss_of_sync_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: loss of signal count = %llu\n",
+ stats->loss_of_signal_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: prim seq protocol err count = %llu\n",
+ stats->prim_seq_protocol_err_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: invalid tx word count= %llu\n",
+ stats->invalid_tx_word_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: invalid crc count = %llu\n",
+ stats->invalid_crc_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp input requests = %llu\n",
+ stats->fcp_input_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp output requests = %llu\n",
+ stats->fcp_output_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp control requests = %llu\n",
+ stats->fcp_control_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp input megabytes = %llu\n",
+ stats->fcp_input_megabytes);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp output megabytes = %llu\n",
+ stats->fcp_output_megabytes);
+ return;
+}
+
+/*
+ * fnic_reset_host_stats : clears host stats
+ * note : called when reset_statistics set under sysfs dir
+ */
+static void fnic_reset_host_stats(struct Scsi_Host *host)
+{
+ int ret;
+ struct fc_lport *lp = shost_priv(host);
+ struct fnic *fnic = lport_priv(lp);
+ struct fc_host_statistics *stats;
+ unsigned long flags;
+
+ /* dump current stats, before clearing them */
+ stats = fnic_get_stats(host);
+ fnic_dump_fchost_stats(host, stats);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ ret = vnic_dev_stats_clear(fnic->vdev);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (ret) {
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic: Reset vnic stats failed"
+ " 0x%x", ret);
+ return;
+ }
+ fnic->stats_reset_time = jiffies;
+ memset(stats, 0, sizeof(*stats));
+
+ return;
+}
+
void fnic_log_q_error(struct fnic *fnic)
{
unsigned int i;
@@ -447,13 +556,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
host->transportt = fnic_fc_transport;
- err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to alloc shared tag map\n");
- goto err_out_free_hba;
- }
-
/* Setup PCI resources */
pci_set_drvdata(pdev, fnic);
@@ -476,10 +578,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 40-bit first, and
+ * limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
@@ -496,10 +598,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to obtain 40-bit DMA "
+ "Unable to obtain 64-bit DMA "
"for consistent allocations, aborting.\n");
goto err_out_release_regions;
}
@@ -566,6 +668,22 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"aborting.\n");
goto err_out_dev_close;
}
+
+ /* Configure Maximum Outstanding IO reqs*/
+ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
+ host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+ max_t(u32, FNIC_MIN_IO_REQ,
+ fnic->config.io_throttle_count));
+ }
+ fnic->fnic_max_tag_id = host->can_queue;
+
+ err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to alloc shared tag map\n");
+ goto err_out_dev_close;
+ }
+
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
host->max_cmd_len = FCOE_MAX_CMD_LEN;
@@ -719,6 +837,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
fc_lport_init_stats(lp);
+ fnic->stats_reset_time = jiffies;
fc_lport_config(lp);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index a97e6e5..d014aae 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
return &fnic->io_req_lock[hash];
}
+static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
+ int tag)
+{
+ return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
+}
+
/*
* Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list.
@@ -730,7 +736,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
fcpio_tag_id_dec(&tag, &id);
icmnd_cmpl = &desc->u.icmnd_cmpl;
- if (id >= FNIC_MAX_IO_REQ) {
+ if (id >= fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host,
"Tag out of range tag %x hdr status = %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
@@ -818,38 +824,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
xfer_len -= icmnd_cmpl->residual;
- /*
- * If queue_full, then try to reduce queue depth for all
- * LUNS on the target. Todo: this should be accompanied
- * by a periodic queue_depth rampup based on successful
- * IO completion.
- */
- if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
- struct scsi_device *t_sdev;
- int qd = 0;
-
- shost_for_each_device(t_sdev, sc->device->host) {
- if (t_sdev->id != sc->device->id)
- continue;
-
- if (t_sdev->queue_depth > 1) {
- qd = scsi_track_queue_full
- (t_sdev,
- t_sdev->queue_depth - 1);
- if (qd == -1)
- qd = t_sdev->host->cmd_per_lun;
- shost_printk(KERN_INFO,
- fnic->lport->host,
- "scsi[%d:%d:%d:%d"
- "] queue full detected,"
- "new depth = %d\n",
- t_sdev->host->host_no,
- t_sdev->channel,
- t_sdev->id, t_sdev->lun,
- t_sdev->queue_depth);
- }
- }
- }
break;
case FCPIO_TIMEOUT: /* request was timed out */
@@ -939,7 +913,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
- if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+ if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host,
"Tag out of range tag %x hdr status = %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
@@ -988,9 +962,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags);
return;
}
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status;
-
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
@@ -1148,23 +1120,25 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
{
- unsigned int i;
+ int i;
struct fnic_io_req *io_req;
unsigned long flags = 0;
struct scsi_cmnd *sc;
spinlock_t *io_lock;
unsigned long start_time = 0;
- for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
+ for (i = 0; i < fnic->fnic_max_tag_id; i++) {
if (i == exclude_id)
continue;
+ io_lock = fnic_io_lock_tag(fnic, i);
+ spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, i);
- if (!sc)
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
+ }
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
@@ -1236,7 +1210,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
fcpio_tag_id_dec(&desc->hdr.tag, &id);
id &= FNIC_TAG_MASK;
- if (id >= FNIC_MAX_IO_REQ)
+ if (id >= fnic->fnic_max_tag_id)
return;
sc = scsi_host_find_tag(fnic->lport->host, id);
@@ -1340,14 +1314,15 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
if (fnic->in_remove)
return;
- for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
abt_tag = tag;
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag);
- if (!sc)
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
-
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ }
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1441,12 +1416,29 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
unsigned long flags;
struct scsi_cmnd *sc;
struct scsi_lun fc_lun;
- struct fc_rport_libfc_priv *rdata = rport->dd_data;
- struct fc_lport *lport = rdata->local_port;
- struct fnic *fnic = lport_priv(lport);
+ struct fc_rport_libfc_priv *rdata;
+ struct fc_lport *lport;
+ struct fnic *fnic;
struct fc_rport *cmd_rport;
enum fnic_ioreq_state old_ioreq_state;
+ if (!rport) {
+ printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
+ return;
+ }
+ rdata = rport->dd_data;
+
+ if (!rdata) {
+ printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
+ return;
+ }
+ lport = rdata->local_port;
+
+ if (!lport) {
+ printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
+ return;
+ }
+ fnic = lport_priv(lport);
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
" wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
@@ -1456,18 +1448,21 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
if (fnic->in_remove)
return;
- for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
abt_tag = tag;
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag);
- if (!sc)
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
+ }
cmd_rport = starget_to_rport(scsi_target(sc->device));
- if (rport != cmd_rport)
+ if (rport != cmd_rport) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
-
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ }
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1680,13 +1675,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req->abts_done = NULL;
/* fw did not complete abort, timed out */
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+
/*
* firmware completed the abort, check the status,
* free the io_req irrespective of failure or success
@@ -1784,17 +1781,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
DECLARE_COMPLETION_ONSTACK(tm_done);
enum fnic_ioreq_state old_ioreq_state;
- for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag);
/*
* ignore this lun reset cmd or cmds that do not belong to
* this lun
*/
- if (!sc || sc == lr_sc || sc->device != lun_dev)
+ if (!sc || sc == lr_sc || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
-
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ }
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1823,6 +1821,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
old_ioreq_state = CMD_STATE(sc);
/*
* Any pending IO issued prior to reset is expected to be
@@ -1833,11 +1836,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
*/
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- if (io_req->abts_done)
- shost_printk(KERN_ERR, fnic->lport->host,
- "%s: io_req->abts_done is set state is %s\n",
- __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
-
BUG_ON(io_req->abts_done);
abt_tag = tag;
@@ -1890,12 +1888,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
io_req->abts_done = NULL;
/* if abort is still pending with fw, fail */
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
ret = 1;
goto clean_pending_aborts_end;
}
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -2093,8 +2092,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
spin_unlock_irqrestore(io_lock, flags);
int_to_scsilun(sc->device->lun, &fc_lun);
/*
- * Issue abort and terminate on the device reset request.
- * If q'ing of the abort fails, retry issue it after a delay.
+ * Issue abort and terminate on device reset request.
+ * If q'ing of terminate fails, retry it after a delay.
*/
while (1) {
spin_lock_irqsave(io_lock, flags);
@@ -2405,7 +2404,7 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
lun_dev = lr_sc->device;
/* walk again to check, if IOs are still pending in fw */
- for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
/*
* ignore this lun reset cmd or cmds that do not belong to
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index fbb5536..e343e1d 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -54,8 +54,8 @@
#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
-#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256
-#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048
#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index fac8cf5..891c86b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -54,7 +54,7 @@
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "2.0.2-1"
+#define HPSA_DRIVER_VERSION "3.4.0-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -89,13 +89,14 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334D},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
@@ -107,7 +108,19 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -125,24 +138,35 @@ static struct board_type products[] = {
{0x3245103C, "Smart Array P410i", &SA5_access},
{0x3247103C, "Smart Array P411", &SA5_access},
{0x3249103C, "Smart Array P812", &SA5_access},
- {0x324a103C, "Smart Array P712m", &SA5_access},
- {0x324b103C, "Smart Array P711m", &SA5_access},
+ {0x324A103C, "Smart Array P712m", &SA5_access},
+ {0x324B103C, "Smart Array P711m", &SA5_access},
{0x3350103C, "Smart Array P222", &SA5_access},
{0x3351103C, "Smart Array P420", &SA5_access},
{0x3352103C, "Smart Array P421", &SA5_access},
{0x3353103C, "Smart Array P822", &SA5_access},
+ {0x334D103C, "Smart Array P822se", &SA5_access},
{0x3354103C, "Smart Array P420i", &SA5_access},
{0x3355103C, "Smart Array P220i", &SA5_access},
{0x3356103C, "Smart Array P721m", &SA5_access},
- {0x1920103C, "Smart Array", &SA5_access},
- {0x1921103C, "Smart Array", &SA5_access},
- {0x1922103C, "Smart Array", &SA5_access},
- {0x1923103C, "Smart Array", &SA5_access},
- {0x1924103C, "Smart Array", &SA5_access},
- {0x1925103C, "Smart Array", &SA5_access},
- {0x1926103C, "Smart Array", &SA5_access},
- {0x1928103C, "Smart Array", &SA5_access},
- {0x334d103C, "Smart Array P822se", &SA5_access},
+ {0x1921103C, "Smart Array P830i", &SA5_access},
+ {0x1922103C, "Smart Array P430", &SA5_access},
+ {0x1923103C, "Smart Array P431", &SA5_access},
+ {0x1924103C, "Smart Array P830", &SA5_access},
+ {0x1926103C, "Smart Array P731m", &SA5_access},
+ {0x1928103C, "Smart Array P230i", &SA5_access},
+ {0x1929103C, "Smart Array P530", &SA5_access},
+ {0x21BD103C, "Smart Array", &SA5_access},
+ {0x21BE103C, "Smart Array", &SA5_access},
+ {0x21BF103C, "Smart Array", &SA5_access},
+ {0x21C0103C, "Smart Array", &SA5_access},
+ {0x21C1103C, "Smart Array", &SA5_access},
+ {0x21C2103C, "Smart Array", &SA5_access},
+ {0x21C3103C, "Smart Array", &SA5_access},
+ {0x21C4103C, "Smart Array", &SA5_access},
+ {0x21C5103C, "Smart Array", &SA5_access},
+ {0x21C7103C, "Smart Array", &SA5_access},
+ {0x21C8103C, "Smart Array", &SA5_access},
+ {0x21C9103C, "Smart Array", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e31caa..23f5ba5 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2208,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
- return -EIO;
+ /* If failure is received, the host adapter is most likely going
+ through reset, return success so the caller will wait for the command
+ being cancelled to get returned */
+ return 0;
}
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
@@ -2221,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (status != IBMVFC_MAD_SUCCESS) {
sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
- return -EIO;
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ the caller will wait for the command being cancelled to get returned */
+ return 0;
+ default:
+ return -EIO;
+ };
}
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d0fa4b6..fa76440 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -241,7 +241,7 @@ static void gather_partition_info(void)
struct device_node *rootdn;
const char *ppartition_name;
- const unsigned int *p_number_ptr;
+ const __be32 *p_number_ptr;
/* Retrieve information about this partition */
rootdn = of_find_node_by_path("/");
@@ -255,7 +255,7 @@ static void gather_partition_info(void)
sizeof(partition_name));
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr)
- partition_number = *p_number_ptr;
+ partition_number = of_read_number(p_number_ptr, 1);
of_node_put(rootdn);
}
@@ -270,10 +270,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
strncpy(hostdata->madapter_info.partition_name, partition_name,
sizeof(hostdata->madapter_info.partition_name));
- hostdata->madapter_info.partition_number = partition_number;
+ hostdata->madapter_info.partition_number =
+ cpu_to_be32(partition_number);
- hostdata->madapter_info.mad_version = 1;
- hostdata->madapter_info.os_type = 2;
+ hostdata->madapter_info.mad_version = cpu_to_be32(1);
+ hostdata->madapter_info.os_type = cpu_to_be32(2);
}
/**
@@ -464,9 +465,9 @@ static int initialize_event_pool(struct event_pool *pool,
memset(&evt->crq, 0x00, sizeof(evt->crq));
atomic_set(&evt->free, 1);
evt->crq.valid = 0x80;
- evt->crq.IU_length = sizeof(*evt->xfer_iu);
- evt->crq.IU_data_ptr = pool->iu_token +
- sizeof(*evt->xfer_iu) * i;
+ evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
+ evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
+ sizeof(*evt->xfer_iu) * i);
evt->xfer_iu = pool->iu_storage + i;
evt->hostdata = hostdata;
evt->ext_list = NULL;
@@ -588,7 +589,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
evt_struct->cmnd_done = NULL;
evt_struct->sync_srp = NULL;
evt_struct->crq.format = format;
- evt_struct->crq.timeout = timeout;
+ evt_struct->crq.timeout = cpu_to_be16(timeout);
evt_struct->done = done;
}
@@ -659,8 +660,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
scsi_for_each_sg(cmd, sg, nseg, i) {
struct srp_direct_buf *descr = md + i;
- descr->va = sg_dma_address(sg);
- descr->len = sg_dma_len(sg);
+ descr->va = cpu_to_be64(sg_dma_address(sg));
+ descr->len = cpu_to_be32(sg_dma_len(sg));
descr->key = 0;
total_length += sg_dma_len(sg);
}
@@ -703,13 +704,14 @@ static int map_sg_data(struct scsi_cmnd *cmd,
}
indirect->table_desc.va = 0;
- indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
+ indirect->table_desc.len = cpu_to_be32(sg_mapped *
+ sizeof(struct srp_direct_buf));
indirect->table_desc.key = 0;
if (sg_mapped <= MAX_INDIRECT_BUFS) {
total_length = map_sg_list(cmd, sg_mapped,
&indirect->desc_list[0]);
- indirect->len = total_length;
+ indirect->len = cpu_to_be32(total_length);
return 1;
}
@@ -731,9 +733,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
- indirect->len = total_length;
- indirect->table_desc.va = evt_struct->ext_list_token;
- indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
+ indirect->len = cpu_to_be32(total_length);
+ indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
+ indirect->table_desc.len = cpu_to_be32(sg_mapped *
+ sizeof(indirect->desc_list[0]));
memcpy(indirect->desc_list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
return 1;
@@ -849,7 +852,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
struct ibmvscsi_host_data *hostdata,
unsigned long timeout)
{
- u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
+ __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
int request_status = 0;
int rc;
int srp_req = 0;
@@ -920,8 +923,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
add_timer(&evt_struct->timer);
}
- if ((rc =
- ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+ rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+ if (rc != 0) {
list_del(&evt_struct->list);
del_timer(&evt_struct->timer);
@@ -987,15 +991,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
memcpy(cmnd->sense_buffer,
rsp->data,
- rsp->sense_data_len);
+ be32_to_cpu(rsp->sense_data_len));
unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
evt_struct->hostdata->dev);
if (rsp->flags & SRP_RSP_FLAG_DOOVER)
- scsi_set_resid(cmnd, rsp->data_out_res_cnt);
+ scsi_set_resid(cmnd,
+ be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
- scsi_set_resid(cmnd, rsp->data_in_res_cnt);
+ scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
}
if (evt_struct->cmnd_done)
@@ -1037,7 +1042,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
- srp_cmd->lun = ((u64) lun) << 48;
+ srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
@@ -1062,9 +1067,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
out_fmt == SRP_DATA_DESC_INDIRECT) &&
indirect->table_desc.va == 0) {
- indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
+ indirect->table_desc.va =
+ cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
offsetof(struct srp_cmd, add_data) +
- offsetof(struct srp_indirect_buf, desc_list);
+ offsetof(struct srp_indirect_buf, desc_list));
}
return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
@@ -1158,7 +1164,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
* request_limit could have been set to -1 by this client.
*/
atomic_set(&hostdata->request_limit,
- evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
+ be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */
scsi_unblock_requests(hostdata->host);
@@ -1184,8 +1190,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login = &evt_struct->iu.srp.login_req;
memset(login, 0, sizeof(*login));
login->opcode = SRP_LOGIN_REQ;
- login->req_it_iu_len = sizeof(union srp_iu);
- login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
+ login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
+ login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
@@ -1214,12 +1221,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct)
dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
evt_struct->xfer_iu->mad.capabilities.common.status);
} else {
- if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+ if (hostdata->caps.migration.common.server_support !=
+ cpu_to_be16(SERVER_SUPPORTS_CAP))
dev_info(hostdata->dev, "Partition migration not supported\n");
if (client_reserve) {
if (hostdata->caps.reserve.common.server_support ==
- SERVER_SUPPORTS_CAP)
+ cpu_to_be16(SERVER_SUPPORTS_CAP))
dev_info(hostdata->dev, "Client reserve enabled\n");
else
dev_info(hostdata->dev, "Client reserve not supported\n");
@@ -1251,9 +1259,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
req = &evt_struct->iu.mad.capabilities;
memset(req, 0, sizeof(*req));
- hostdata->caps.flags = CAP_LIST_SUPPORTED;
+ hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
if (hostdata->client_migrated)
- hostdata->caps.flags |= CLIENT_MIGRATED;
+ hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
sizeof(hostdata->caps.name));
@@ -1264,22 +1272,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
- req->common.type = VIOSRP_CAPABILITIES_TYPE;
- req->buffer = hostdata->caps_addr;
+ req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
+ req->buffer = cpu_to_be64(hostdata->caps_addr);
- hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
- hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
- hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
- hostdata->caps.migration.ecl = 1;
+ hostdata->caps.migration.common.cap_type =
+ cpu_to_be32(MIGRATION_CAPABILITIES);
+ hostdata->caps.migration.common.length =
+ cpu_to_be16(sizeof(hostdata->caps.migration));
+ hostdata->caps.migration.common.server_support =
+ cpu_to_be16(SERVER_SUPPORTS_CAP);
+ hostdata->caps.migration.ecl = cpu_to_be32(1);
if (client_reserve) {
- hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
- hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
- hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
- hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
- req->common.length = sizeof(hostdata->caps);
+ hostdata->caps.reserve.common.cap_type =
+ cpu_to_be32(RESERVATION_CAPABILITIES);
+ hostdata->caps.reserve.common.length =
+ cpu_to_be16(sizeof(hostdata->caps.reserve));
+ hostdata->caps.reserve.common.server_support =
+ cpu_to_be16(SERVER_SUPPORTS_CAP);
+ hostdata->caps.reserve.type =
+ cpu_to_be32(CLIENT_RESERVE_SCSI_2);
+ req->common.length =
+ cpu_to_be16(sizeof(hostdata->caps));
} else
- req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+ req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
+ sizeof(hostdata->caps.reserve));
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1297,7 +1314,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
static void fast_fail_rsp(struct srp_event_struct *evt_struct)
{
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
- u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+ u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
if (status == VIOSRP_MAD_NOT_SUPPORTED)
dev_err(hostdata->dev, "fast_fail not supported in server\n");
@@ -1334,8 +1351,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
fast_fail_mad = &evt_struct->iu.mad.fast_fail;
memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
- fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
- fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+ fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
+ fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
@@ -1362,15 +1379,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
"host partition %s (%d), OS %d, max io %u\n",
hostdata->madapter_info.srp_version,
hostdata->madapter_info.partition_name,
- hostdata->madapter_info.partition_number,
- hostdata->madapter_info.os_type,
- hostdata->madapter_info.port_max_txu[0]);
+ be32_to_cpu(hostdata->madapter_info.partition_number),
+ be32_to_cpu(hostdata->madapter_info.os_type),
+ be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
- hostdata->madapter_info.port_max_txu[0] >> 9;
+ be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
- if (hostdata->madapter_info.os_type == 3 &&
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
hostdata->madapter_info.srp_version);
@@ -1379,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
- if (hostdata->madapter_info.os_type == 3) {
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
enable_fast_fail(hostdata);
return;
}
@@ -1414,9 +1431,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
req = &evt_struct->iu.mad.adapter_info;
memset(req, 0x00, sizeof(*req));
- req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
- req->common.length = sizeof(hostdata->madapter_info);
- req->buffer = hostdata->adapter_info_addr;
+ req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
+ req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
+ req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1501,7 +1518,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
/* Set up an abort SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = ((u64) lun) << 48;
+ tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->task_tag = (u64) found_evt;
@@ -1624,7 +1641,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* Set up a lun reset SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = ((u64) lun) << 48;
+ tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
evt->sync_srp = &srp_rsp;
@@ -1735,8 +1752,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
{
long rc;
unsigned long flags;
+ /* The hypervisor copies our tag value here so no byteswapping */
struct srp_event_struct *evt_struct =
- (struct srp_event_struct *)crq->IU_data_ptr;
+ (__force struct srp_event_struct *)crq->IU_data_ptr;
switch (crq->valid) {
case 0xC0: /* initialization */
switch (crq->format) {
@@ -1792,18 +1810,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
*/
if (!valid_event_struct(&hostdata->pool, evt_struct)) {
dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
- (void *)crq->IU_data_ptr);
+ evt_struct);
return;
}
if (atomic_read(&evt_struct->free)) {
dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
- (void *)crq->IU_data_ptr);
+ evt_struct);
return;
}
if (crq->format == VIOSRP_SRP_FORMAT)
- atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
+ atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
&hostdata->request_limit);
del_timer(&evt_struct->timer);
@@ -1856,13 +1874,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
/* Set up a lun reset SRP command */
memset(host_config, 0x00, sizeof(*host_config));
- host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
- host_config->common.length = length;
- host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
- length,
- DMA_BIDIRECTIONAL);
+ host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
+ host_config->common.length = cpu_to_be16(length);
+ addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
+ if (dma_mapping_error(hostdata->dev, addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev,
"dma_mapping error getting host config\n");
@@ -1870,6 +1886,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
return -1;
}
+ host_config->buffer = cpu_to_be64(addr);
+
init_completion(&evt_struct->comp);
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 2cd735d..1162430 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -75,9 +75,9 @@ struct viosrp_crq {
u8 format; /* SCSI vs out-of-band */
u8 reserved;
u8 status; /* non-scsi failure? (e.g. DMA failure) */
- u16 timeout; /* in seconds */
- u16 IU_length; /* in bytes */
- u64 IU_data_ptr; /* the TCE for transferring data */
+ __be16 timeout; /* in seconds */
+ __be16 IU_length; /* in bytes */
+ __be64 IU_data_ptr; /* the TCE for transferring data */
};
/* MADs are Management requests above and beyond the IUs defined in the SRP
@@ -124,10 +124,10 @@ enum viosrp_capability_flag {
* Common MAD header
*/
struct mad_common {
- u32 type;
- u16 status;
- u16 length;
- u64 tag;
+ __be32 type;
+ __be16 status;
+ __be16 length;
+ __be64 tag;
};
/*
@@ -139,23 +139,23 @@ struct mad_common {
*/
struct viosrp_empty_iu {
struct mad_common common;
- u64 buffer;
- u32 port;
+ __be64 buffer;
+ __be32 port;
};
struct viosrp_error_log {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_adapter_info {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_host_config {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_fast_fail {
@@ -164,27 +164,27 @@ struct viosrp_fast_fail {
struct viosrp_capabilities {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct mad_capability_common {
- u32 cap_type;
- u16 length;
- u16 server_support;
+ __be32 cap_type;
+ __be16 length;
+ __be16 server_support;
};
struct mad_reserve_cap {
struct mad_capability_common common;
- u32 type;
+ __be32 type;
};
struct mad_migration_cap {
struct mad_capability_common common;
- u32 ecl;
+ __be32 ecl;
};
struct capabilities{
- u32 flags;
+ __be32 flags;
char name[SRP_MAX_LOC_LEN];
char loc[SRP_MAX_LOC_LEN];
struct mad_migration_cap migration;
@@ -208,10 +208,10 @@ union viosrp_iu {
struct mad_adapter_info_data {
char srp_version[8];
char partition_name[96];
- u32 partition_number;
- u32 mad_version;
- u32 os_type;
- u32 port_max_txu[8]; /* per-port maximum transfer */
+ __be32 partition_number;
+ __be32 mad_version;
+ __be32 os_type;
+ __be32 port_max_txu[8]; /* per-port maximum transfer */
};
#endif
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index df43bfe..4e1b75c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -708,6 +708,7 @@ struct lpfc_hba {
uint32_t cfg_multi_ring_type;
uint32_t cfg_poll;
uint32_t cfg_poll_tmo;
+ uint32_t cfg_task_mgmt_tmo;
uint32_t cfg_use_msi;
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 16498e0..00656fc 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1865,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
- "3053 lpfc_" #attr " changed from %d to %d\n", \
- vport->cfg_##attr, val); \
+ "3053 lpfc_" #attr \
+ " changed from %d (x%x) to %d (x%x)\n", \
+ vport->cfg_##attr, vport->cfg_##attr, \
+ val, val); \
vport->cfg_##attr = val;\
return 0;\
}\
@@ -4011,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
# For [0], FCP commands are issued to Work Queues ina round robin fashion.
# For [1], FCP commands are issued to a Work Queue associated with the
# current CPU.
+# It would be set to 1 by the driver if it's able to set up cpu affinity
+# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
+# roundrobin scheduling of FCP I/Os through WQs will be used.
*/
-LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
+LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
@@ -4110,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
"Milliseconds driver will wait between polling FCP ring");
/*
+# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
+# to complete in seconds. Value range is [5,180], default value is 60.
+*/
+LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
+ "Maximum time to wait for task management commands to complete");
+/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
# 0 = MSI disabled
@@ -4295,6 +4306,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_issue_reset,
&dev_attr_lpfc_poll,
&dev_attr_lpfc_poll_tmo,
+ &dev_attr_lpfc_task_mgmt_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
@@ -5274,6 +5286,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_topology_init(phba, lpfc_topology);
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+ lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 79c13c3..b92aec9 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
iocb = &dd_data->context_un.iocb;
ndlp = iocb->ndlp;
rmp = iocb->rmp;
@@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
int request_nseg;
int reply_nseg;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
int rc = 0;
int iocb_stat;
@@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
}
iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (iocb_stat == IOCB_SUCCESS)
+
+ if (iocb_stat == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed yet */
+ if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
- else if (iocb_stat == IOCB_BUSY)
+ } else if (iocb_stat == IOCB_BUSY) {
rc = -EAGAIN;
- else
+ } else {
rc = -EIO;
+ }
/* iocb failed so cleanup */
+ job->dd_data = NULL;
free_rmp:
lpfc_free_bsg_buffers(phba, rmp);
@@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
rsp = &rspiocbq->iocb;
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
@@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
struct lpfc_iocbq *cmdiocbq;
uint16_t rpi = 0;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
int rc = 0;
@@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (rc == IOCB_SUCCESS)
+ if (rc == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed/released */
+ if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
- else if (rc == IOCB_BUSY)
+ } else if (rc == IOCB_BUSY) {
rc = -EAGAIN;
- else
+ } else {
rc = -EIO;
+ }
-linkdown_err:
+ /* iocb failed so cleanup */
+ job->dd_data = NULL;
+linkdown_err:
cmdiocbq->context1 = ndlp;
lpfc_els_free_iocb(phba, cmdiocbq);
@@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
struct lpfc_hba *phba = vport->phba;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
- struct lpfc_bsg_event *evt;
+ struct lpfc_bsg_event *evt, *evt_next;
struct event_data *evt_dat = NULL;
unsigned long flags;
uint32_t rc = 0;
@@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
event_reply = (struct get_ct_event_reply *)
job->reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
- list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+ list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
if (list_empty(&evt->events_to_get))
break;
@@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
ndlp = dd_data->context_un.iocb.ndlp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
@@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
int rc = 0;
struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
/* allocate our bsg tracking structure */
@@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
- if (rc == IOCB_SUCCESS)
+ if (rc == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed/released */
+ if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
+ }
+
+ /* iocb failed so cleanup */
+ job->dd_data = NULL;
issue_ct_rsp_exit:
lpfc_sli_release_iocbq(phba, ctiocb);
@@ -5284,9 +5333,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
* remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag
*/
-
cmdiocb = dd_data->context_un.iocb.cmdiocbq;
- spin_lock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O abort window is still open */
+ if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return -EAGAIN;
+ }
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) {
if (check_iocb == cmdiocb) {
@@ -5296,8 +5351,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
}
if (list_empty(&completions))
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
- spin_unlock_irq(&phba->hbalock);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT,
@@ -5321,9 +5375,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
* remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag.
*/
-
cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_lock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) {
if (check_iocb == cmdiocb) {
@@ -5333,8 +5388,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
}
if (list_empty(&completions))
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
- spin_unlock_irq(&phba->hbalock);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 60d6ca2..7801601 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4437,6 +4437,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
+ mempool_free(pmb, phba->mbox_mem_pool);
}
/*
@@ -4456,7 +4457,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
int rc;
uint16_t rpi;
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+ ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
+ if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "3366 RPI x%x needs to be "
+ "unregistered nlp_flag x%x "
+ "did x%x\n",
+ ndlp->nlp_rpi, ndlp->nlp_flag,
+ ndlp->nlp_DID);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
/* SLI4 ports require the physical rpi value. */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 501147c..647f5bf 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_max);
spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock_irq(&phba->scsi_buf_list_put_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
- spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
@@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
}
spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock_irq(&phba->scsi_buf_list_put_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
- spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
return 0;
@@ -4859,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe;
int longs;
+ /* Get all the module params for configuring this host */
+ lpfc_get_cfgparam(phba);
+
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
if (rc)
@@ -4902,15 +4905,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct lpfc_mbox_ext_buf_ctx));
INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
- /*
- * We need to do a READ_CONFIG mailbox command here before
- * calling lpfc_get_cfgparam. For VFs this will report the
- * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
- * All of the resources allocated
- * for this Port are tied to these values.
- */
- /* Get all the module params for configuring this host */
- lpfc_get_cfgparam(phba);
phba->max_vpi = LPFC_MAX_VPI;
/* This will be set to correct value after the read_config mbox */
@@ -7141,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.fcp_wq = NULL;
}
- if (phba->pci_bar0_memmap_p) {
- iounmap(phba->pci_bar0_memmap_p);
- phba->pci_bar0_memmap_p = NULL;
- }
- if (phba->pci_bar2_memmap_p) {
- iounmap(phba->pci_bar2_memmap_p);
- phba->pci_bar2_memmap_p = NULL;
- }
- if (phba->pci_bar4_memmap_p) {
- iounmap(phba->pci_bar4_memmap_p);
- phba->pci_bar4_memmap_p = NULL;
- }
-
/* Release FCP CQ mapping array */
if (phba->sli4_hba.fcp_cq_map != NULL) {
kfree(phba->sli4_hba.fcp_cq_map);
@@ -7942,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
* particular PCI BARs regions is dependent on the type of
* SLI4 device.
*/
- if (pci_resource_start(pdev, 0)) {
- phba->pci_bar0_map = pci_resource_start(pdev, 0);
- bar0map_len = pci_resource_len(pdev, 0);
+ if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
+ phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+ bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
/*
* Map SLI4 PCI Config Space Register base to a kernel virtual
@@ -7958,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"registers.\n");
goto out;
}
+ phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
/* Set up BAR0 PCI config space register memory map */
lpfc_sli4_bar0_register_memmap(phba, if_type);
} else {
@@ -7980,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
}
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, 2))) {
+ (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
/*
* Map SLI4 if type 0 HBA Control Register base to a kernel
* virtual address and setup the registers.
*/
- phba->pci_bar1_map = pci_resource_start(pdev, 2);
- bar1map_len = pci_resource_len(pdev, 2);
+ phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
phba->sli4_hba.ctrl_regs_memmap_p =
ioremap(phba->pci_bar1_map, bar1map_len);
if (!phba->sli4_hba.ctrl_regs_memmap_p) {
@@ -7994,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"ioremap failed for SLI4 HBA control registers.\n");
goto out_iounmap_conf;
}
+ phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
lpfc_sli4_bar1_register_memmap(phba);
}
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, 4))) {
+ (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
/*
* Map SLI4 if type 0 HBA Doorbell Register base to a kernel
* virtual address and setup the registers.
*/
- phba->pci_bar2_map = pci_resource_start(pdev, 4);
- bar2map_len = pci_resource_len(pdev, 4);
+ phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
phba->sli4_hba.drbl_regs_memmap_p =
ioremap(phba->pci_bar2_map, bar2map_len);
if (!phba->sli4_hba.drbl_regs_memmap_p) {
@@ -8012,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"ioremap failed for SLI4 HBA doorbell registers.\n");
goto out_iounmap_ctrl;
}
+ phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
if (error)
goto out_iounmap_all;
@@ -8405,7 +8389,8 @@ static int
lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
{
int i, idx, saved_chann, used_chann, cpu, phys_id;
- int max_phys_id, num_io_channel, first_cpu;
+ int max_phys_id, min_phys_id;
+ int num_io_channel, first_cpu, chan;
struct lpfc_vector_map_info *cpup;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
@@ -8423,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
phba->sli4_hba.num_present_cpu));
max_phys_id = 0;
+ min_phys_id = 0xff;
phys_id = 0;
num_io_channel = 0;
first_cpu = LPFC_VECTOR_MAP_EMPTY;
@@ -8446,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
if (cpup->phys_id > max_phys_id)
max_phys_id = cpup->phys_id;
+ if (cpup->phys_id < min_phys_id)
+ min_phys_id = cpup->phys_id;
cpup++;
}
+ phys_id = min_phys_id;
/* Now associate the HBA vectors with specific CPUs */
for (idx = 0; idx < vectors; idx++) {
cpup = phba->sli4_hba.cpu_map;
@@ -8459,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
for (i = 1; i < max_phys_id; i++) {
phys_id++;
if (phys_id > max_phys_id)
- phys_id = 0;
+ phys_id = min_phys_id;
cpu = lpfc_find_next_cpu(phba, phys_id);
if (cpu == LPFC_VECTOR_MAP_EMPTY)
continue;
goto found;
}
+ /* Use round robin for scheduling */
+ phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
+ chan = 0;
+ cpup = phba->sli4_hba.cpu_map;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ cpup->channel_id = chan;
+ cpup++;
+ chan++;
+ if (chan >= phba->cfg_fcp_io_channel)
+ chan = 0;
+ }
+
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3329 Cannot set affinity:"
"Error mapping vector %d (%d)\n",
@@ -8503,7 +8504,7 @@ found:
/* Spread vector mapping across multple physical CPU nodes */
phys_id++;
if (phys_id > max_phys_id)
- phys_id = 0;
+ phys_id = min_phys_id;
}
/*
@@ -8513,7 +8514,7 @@ found:
* Base the remaining IO channel assigned, to IO channels already
* assigned to other CPUs on the same phys_id.
*/
- for (i = 0; i <= max_phys_id; i++) {
+ for (i = min_phys_id; i <= max_phys_id; i++) {
/*
* If there are no io channels already mapped to
* this phys_id, just round robin thru the io_channels.
@@ -8595,10 +8596,11 @@ out:
if (num_io_channel != phba->sli4_hba.num_present_cpu)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set affinity mismatch:"
- "%d chann != %d cpus: %d vactors\n",
+ "%d chann != %d cpus: %d vectors\n",
num_io_channel, phba->sli4_hba.num_present_cpu,
vectors);
+ /* Enable using cpu affinity for scheduling */
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
return 1;
}
@@ -8689,9 +8691,12 @@ enable_msix_vectors:
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--)
+ for (--index; index >= 0; index--) {
+ irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+ vector, NULL);
free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]);
+ }
msi_fail_out:
/* Unconfigure MSI-X capability structure */
@@ -8712,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
int index;
/* Free up MSI-X multi-message vectors */
- for (index = 0; index < phba->cfg_fcp_io_channel; index++)
+ for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
+ irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+ vector, NULL);
free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]);
+ }
/* Disable MSI-X */
pci_disable_msix(phba->pcidev);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1242b6c..c913e8c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
/* get all SCSI buffers need to repost to a local list */
spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock_irq(&phba->scsi_buf_list_put_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
- spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
/* post the list of scsi buffer sgls to port if available */
@@ -1000,9 +1000,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
}
memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
- /* Page alignment is CRITICAL, double check to be sure */
- if (((unsigned long)(psb->data) &
- (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+ /*
+ * 4K Page alignment is CRITICAL to BlockGuard, double check
+ * to be sure.
+ */
+ if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
+ (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
@@ -1134,22 +1137,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
struct lpfc_scsi_buf * lpfc_cmd = NULL;
struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
- unsigned long gflag = 0;
- unsigned long pflag = 0;
+ unsigned long iflag = 0;
- spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+ spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
list);
if (!lpfc_cmd) {
- spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice(&phba->lpfc_scsi_buf_list_put,
&phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
list_remove_head(scsi_buf_list_get, lpfc_cmd,
struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
}
- spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
return lpfc_cmd;
}
/**
@@ -1167,11 +1169,10 @@ static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
- unsigned long gflag = 0;
- unsigned long pflag = 0;
+ unsigned long iflag = 0;
int found = 0;
- spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+ spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
&phba->lpfc_scsi_buf_list_get, list) {
if (lpfc_test_rrq_active(phba, ndlp,
@@ -1182,11 +1183,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
break;
}
if (!found) {
- spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice(&phba->lpfc_scsi_buf_list_put,
&phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
- spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
&phba->lpfc_scsi_buf_list_get, list) {
if (lpfc_test_rrq_active(
@@ -1197,7 +1198,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
break;
}
}
- spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (!found)
return NULL;
return lpfc_cmd;
@@ -3966,11 +3967,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
/*
* Check SLI validation that all the transfer was actually done
- * (fcpi_parm should be zero).
+ * (fcpi_parm should be zero). Apply check only to reads.
*/
- } else if (fcpi_parm) {
+ } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "9029 FCP Data Transfer Check Error: "
+ "9029 FCP Read Check Error Data: "
"x%x x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
@@ -4342,6 +4343,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
char tag[2];
uint8_t *ptr;
bool sli4;
+ uint32_t fcpdl;
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return;
@@ -4389,8 +4391,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
iocb_cmd->ulpPU = PARM_READ_CHECK;
if (vport->cfg_first_burst_size &&
(pnode->nlp_flag & NLP_FIRSTBURST)) {
- piocbq->iocb.un.fcpi.fcpi_XRdy =
- vport->cfg_first_burst_size;
+ fcpdl = scsi_bufflen(scsi_cmnd);
+ if (fcpdl < vport->cfg_first_burst_size)
+ piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
+ else
+ piocbq->iocb.un.fcpi.fcpi_XRdy =
+ vport->cfg_first_burst_size;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
phba->fc4OutputRequests++;
@@ -4878,6 +4884,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto out_unlock;
}
+ /* Indicate the IO is being aborted by the driver. */
+ iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
/*
* The scsi command can not be in txq and it is in flight because the
* pCmd is still pointig at the SCSI command we have to abort. There
@@ -5006,7 +5015,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
if (lpfc_cmd == NULL)
return FAILED;
- lpfc_cmd->timeout = 60;
+ lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0392e11..612f489 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -9831,6 +9831,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abort_cmd) != 0)
continue;
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
/* issue ABTS for this IOCB based on iotag */
abtsiocb = lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
@@ -9838,6 +9845,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue;
}
+ /* indicate the IO is being aborted by the driver. */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
cmd = &iocbq->iocb;
abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
@@ -9847,7 +9857,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass;
- abtsiocb->vport = phba->pport;
+ abtsiocb->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
@@ -12233,7 +12243,6 @@ static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
{
struct pci_dev *pdev;
- unsigned long bar_map, bar_map_len;
if (!phba->pcidev)
return NULL;
@@ -12242,25 +12251,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
switch (pci_barset) {
case WQ_PCI_BAR_0_AND_1:
- if (!phba->pci_bar0_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
- phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar0_memmap_p;
case WQ_PCI_BAR_2_AND_3:
- if (!phba->pci_bar2_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar2_memmap_p;
case WQ_PCI_BAR_4_AND_5:
- if (!phba->pci_bar4_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar4_memmap_p;
default:
break;
@@ -15808,7 +15802,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
- struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 FCF (x%x) reached driver's book "
@@ -15818,7 +15812,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
}
/* Clear the eligible FCF record index bmask */
spin_lock_irq(&phba->hbalock);
- list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
+ list) {
if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
list_del_init(&fcf_pri->list);
break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 9761799..6b0f247 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -58,7 +58,7 @@ struct lpfc_iocbq {
IOCB_t iocb; /* IOCB cmd */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
- uint16_t iocb_flag;
+ uint32_t iocb_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
@@ -73,11 +73,11 @@ struct lpfc_iocbq {
#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
+#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
- uint8_t rsvd2;
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 5bcc382..85120b7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -523,7 +523,7 @@ struct lpfc_sli4_hba {
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
- uint8_t fw_func_mode; /* FW function protocol mode */
+ uint32_t fw_func_mode; /* FW function protocol mode */
uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 21859d2..f58f183 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.41"
+#define LPFC_DRIVER_VERSION "8.3.42"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 04a42a5..0c73ba4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.600.18.00-rc1"
-#define MEGASAS_RELDATE "May. 15, 2013"
-#define MEGASAS_EXT_VERSION "Wed. May. 15 17:00:00 PDT 2013"
+#define MEGASAS_VERSION "06.700.06.00-rc1"
+#define MEGASAS_RELDATE "Aug. 31, 2013"
+#define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013"
/*
* Device IDs
@@ -170,6 +170,7 @@
#define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_LD_GET_LIST 0x03010000
+#define MR_DCMD_LD_LIST_QUERY 0x03010100
#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
#define MR_FLUSH_CTRL_CACHE 0x01
@@ -345,6 +346,15 @@ enum MR_PD_QUERY_TYPE {
MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
};
+enum MR_LD_QUERY_TYPE {
+ MR_LD_QUERY_TYPE_ALL = 0,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
+ MR_LD_QUERY_TYPE_USED_TGT_IDS = 2,
+ MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3,
+ MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4,
+};
+
+
#define MR_EVT_CFG_CLEARED 0x0004
#define MR_EVT_LD_STATE_CHANGE 0x0051
#define MR_EVT_PD_INSERTED 0x005b
@@ -435,6 +445,14 @@ struct MR_LD_LIST {
} ldList[MAX_LOGICAL_DRIVES];
} __packed;
+struct MR_LD_TARGETID_LIST {
+ u32 size;
+ u32 count;
+ u8 pad[3];
+ u8 targetId[MAX_LOGICAL_DRIVES];
+};
+
+
/*
* SAS controller properties
*/
@@ -474,21 +492,39 @@ struct megasas_ctrl_prop {
* a bit in the following structure.
*/
struct {
- u32 copyBackDisabled : 1;
- u32 SMARTerEnabled : 1;
- u32 prCorrectUnconfiguredAreas : 1;
- u32 useFdeOnly : 1;
- u32 disableNCQ : 1;
- u32 SSDSMARTerEnabled : 1;
- u32 SSDPatrolReadEnabled : 1;
- u32 enableSpinDownUnconfigured : 1;
- u32 autoEnhancedImport : 1;
- u32 enableSecretKeyControl : 1;
- u32 disableOnlineCtrlReset : 1;
- u32 allowBootWithPinnedCache : 1;
- u32 disableSpinDownHS : 1;
- u32 enableJBOD : 1;
- u32 reserved :18;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:18;
+ u32 enableJBOD:1;
+ u32 disableSpinDownHS:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 enableSecretKeyControl:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 disableNCQ:1;
+ u32 useFdeOnly:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 SMARTerEnabled:1;
+ u32 copyBackDisabled:1;
+#else
+ u32 copyBackDisabled:1;
+ u32 SMARTerEnabled:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 useFdeOnly:1;
+ u32 disableNCQ:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSecretKeyControl:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableSpinDownHS:1;
+ u32 enableJBOD:1;
+ u32 reserved:18;
+#endif
} OnOffProperties;
u8 autoSnapVDSpace;
u8 viewSpace;
@@ -802,6 +838,30 @@ struct megasas_ctrl_info {
u16 cacheMemorySize; /*7A2h */
struct { /*7A4h */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:11;
+ u32 supportUnevenSpans:1;
+ u32 dedicatedHotSparesLimited:1;
+ u32 headlessMode:1;
+ u32 supportEmulatedDrives:1;
+ u32 supportResetNow:1;
+ u32 realTimeScheduler:1;
+ u32 supportSSDPatrolRead:1;
+ u32 supportPerfTuning:1;
+ u32 disableOnlinePFKChange:1;
+ u32 supportJBOD:1;
+ u32 supportBootTimePFKChange:1;
+ u32 supportSetLinkSpeed:1;
+ u32 supportEmergencySpares:1;
+ u32 supportSuspendResumeBGops:1;
+ u32 blockSSDWriteCacheChange:1;
+ u32 supportShieldState:1;
+ u32 supportLdBBMInfo:1;
+ u32 supportLdPIType3:1;
+ u32 supportLdPIType2:1;
+ u32 supportLdPIType1:1;
+ u32 supportPIcontroller:1;
+#else
u32 supportPIcontroller:1;
u32 supportLdPIType1:1;
u32 supportLdPIType2:1;
@@ -827,6 +887,7 @@ struct megasas_ctrl_info {
u32 supportUnevenSpans:1;
u32 reserved:11;
+#endif
} adapterOperations2;
u8 driverVersion[32]; /*7A8h */
@@ -863,7 +924,7 @@ struct megasas_ctrl_info {
* ===============================
*/
#define MEGASAS_MAX_PD_CHANNELS 2
-#define MEGASAS_MAX_LD_CHANNELS 2
+#define MEGASAS_MAX_LD_CHANNELS 1
#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
MEGASAS_MAX_LD_CHANNELS)
#define MEGASAS_MAX_DEV_PER_CHANNEL 128
@@ -1051,9 +1112,15 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:30;
+ u32 support_additional_msix:1;
+ u32 support_fp_remote_lun:1;
+#else
u32 support_fp_remote_lun:1;
u32 support_additional_msix:1;
u32 reserved:30;
+#endif
} mfi_capabilities;
u32 reg;
} MFI_CAPABILITIES;
@@ -1656,4 +1723,16 @@ struct megasas_mgmt_info {
int max_index;
};
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 1f0ca68..3020921 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : 06.600.18.00-rc1
+ * Version : 06.700.06.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_ld_list_query(struct megasas_instance *instance,
+ u8 query_type);
static int megasas_issue_init_mfi(struct megasas_instance *instance);
static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
@@ -374,13 +376,11 @@ static int
megasas_check_reset_xscale(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- u32 consumer;
- consumer = *instance->consumer;
if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
- (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
+ (le32_to_cpu(*instance->consumer) ==
+ MEGASAS_ADPRESET_INPROG_SIGN))
return 1;
- }
return 0;
}
@@ -629,9 +629,10 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
{
unsigned long flags;
spin_lock_irqsave(&instance->hba_lock, flags);
- writel(0, &(regs)->inbound_high_queue_port);
- writel((frame_phys_addr | (frame_count<<1))|1,
- &(regs)->inbound_low_queue_port);
+ writel(upper_32_bits(frame_phys_addr),
+ &(regs)->inbound_high_queue_port);
+ writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+ &(regs)->inbound_low_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -879,8 +880,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
struct megasas_header *frame_hdr = &cmd->frame->hdr;
- frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+ frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
/*
* Issue the frame using inbound queue port
@@ -944,10 +945,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
*/
abort_fr->cmd = MFI_CMD_ABORT;
abort_fr->cmd_status = 0xFF;
- abort_fr->flags = 0;
- abort_fr->abort_context = cmd_to_abort->index;
- abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
- abort_fr->abort_mfi_phys_addr_hi = 0;
+ abort_fr->flags = cpu_to_le16(0);
+ abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
+ abort_fr->abort_mfi_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
+ abort_fr->abort_mfi_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
cmd->sync_cmd = 1;
cmd->cmd_status = 0xFF;
@@ -986,8 +989,8 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+ mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
}
}
return sge_count;
@@ -1015,8 +1018,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+ mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
}
}
return sge_count;
@@ -1043,10 +1046,11 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
+ mfi_sgl->sge_skinny[i].length =
+ cpu_to_le32(sg_dma_len(os_sgl));
mfi_sgl->sge_skinny[i].phys_addr =
- sg_dma_address(os_sgl);
- mfi_sgl->sge_skinny[i].flag = 0;
+ cpu_to_le64(sg_dma_address(os_sgl));
+ mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
}
}
return sge_count;
@@ -1155,8 +1159,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->cdb_len = scp->cmd_len;
pthru->timeout = 0;
pthru->pad_0 = 0;
- pthru->flags = flags;
- pthru->data_xfer_len = scsi_bufflen(scp);
+ pthru->flags = cpu_to_le16(flags);
+ pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
@@ -1168,18 +1172,18 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
if ((scp->request->timeout / HZ) > 0xFFFF)
pthru->timeout = 0xFFFF;
else
- pthru->timeout = scp->request->timeout / HZ;
+ pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
}
/*
* Construct SGL
*/
if (instance->flag_ieee == 1) {
- pthru->flags |= MFI_FRAME_SGL64;
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
&pthru->sgl);
} else if (IS_DMA64) {
- pthru->flags |= MFI_FRAME_SGL64;
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
pthru->sge_count = megasas_make_sgl64(instance, scp,
&pthru->sgl);
} else
@@ -1196,8 +1200,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
* Sense info specific
*/
pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
- pthru->sense_buf_phys_addr_hi = 0;
- pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+ pthru->sense_buf_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
+ pthru->sense_buf_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
/*
* Compute the total number of frames this command consumes. FW uses
@@ -1248,7 +1254,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
ldio->timeout = 0;
ldio->reserved_0 = 0;
ldio->pad_0 = 0;
- ldio->flags = flags;
+ ldio->flags = cpu_to_le16(flags);
ldio->start_lba_hi = 0;
ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
@@ -1256,52 +1262,59 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
* 6-byte READ(0x08) or WRITE(0x0A) cdb
*/
if (scp->cmd_len == 6) {
- ldio->lba_count = (u32) scp->cmnd[4];
- ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
- ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) |
+ (u32) scp->cmnd[3]);
- ldio->start_lba_lo &= 0x1FFFFF;
+ ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
}
/*
* 10-byte READ(0x28) or WRITE(0x2A) cdb
*/
else if (scp->cmd_len == 10) {
- ldio->lba_count = (u32) scp->cmnd[8] |
- ((u32) scp->cmnd[7] << 8);
- ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8));
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
}
/*
* 12-byte READ(0xA8) or WRITE(0xAA) cdb
*/
else if (scp->cmd_len == 12) {
- ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
- ((u32) scp->cmnd[7] << 16) |
- ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
- ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
}
/*
* 16-byte READ(0x88) or WRITE(0x8A) cdb
*/
else if (scp->cmd_len == 16) {
- ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
- ((u32) scp->cmnd[11] << 16) |
- ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) |
+ (u32) scp->cmnd[13]);
- ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
- ((u32) scp->cmnd[7] << 16) |
- ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
- ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
}
@@ -1309,11 +1322,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
* Construct SGL
*/
if (instance->flag_ieee) {
- ldio->flags |= MFI_FRAME_SGL64;
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
&ldio->sgl);
} else if (IS_DMA64) {
- ldio->flags |= MFI_FRAME_SGL64;
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
} else
ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
@@ -1329,7 +1342,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
*/
ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
ldio->sense_buf_phys_addr_hi = 0;
- ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+ ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
/*
* Compute the total number of frames this command consumes. FW uses
@@ -1400,20 +1413,32 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
ldio = (struct megasas_io_frame *)cmd->frame;
mfi_sgl = &ldio->sgl;
sgcount = ldio->sge_count;
- printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount);
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+ " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
+ le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
+ le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
}
else {
pthru = (struct megasas_pthru_frame *) cmd->frame;
mfi_sgl = &pthru->sgl;
sgcount = pthru->sge_count;
- printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount);
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+ "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
+ pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
+ le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
}
if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
for (n = 0; n < sgcount; n++){
if (IS_DMA64)
- printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ;
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
+ le32_to_cpu(mfi_sgl->sge64[n].length),
+ le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
else
- printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ;
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
+ le32_to_cpu(mfi_sgl->sge32[n].length),
+ le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
}
}
printk(KERN_ERR "\n");
@@ -1674,11 +1699,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
spin_lock_irqsave(&instance->completion_lock, flags);
- producer = *instance->producer;
- consumer = *instance->consumer;
+ producer = le32_to_cpu(*instance->producer);
+ consumer = le32_to_cpu(*instance->consumer);
while (consumer != producer) {
- context = instance->reply_queue[consumer];
+ context = le32_to_cpu(instance->reply_queue[consumer]);
if (context >= instance->max_fw_cmds) {
printk(KERN_ERR "Unexpected context value %x\n",
context);
@@ -1695,7 +1720,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
}
}
- *instance->consumer = producer;
+ *instance->consumer = cpu_to_le32(producer);
spin_unlock_irqrestore(&instance->completion_lock, flags);
@@ -1716,7 +1741,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
- *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN;
+ *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
}
instance->instancet->disable_intr(instance);
instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
@@ -2186,6 +2211,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
struct megasas_header *hdr = &cmd->frame->hdr;
unsigned long flags;
struct fusion_context *fusion = instance->ctrl_context;
+ u32 opcode;
/* flag for the retry reset */
cmd->retry_for_fw_reset = 0;
@@ -2287,9 +2313,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */
- if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
- (cmd->frame->dcmd.mbox.b[1] == 1)) {
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
if (cmd->frame->hdr.cmd_status != 0) {
@@ -2323,8 +2350,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
flags);
break;
}
- if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
- cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+ if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+ opcode == MR_DCMD_CTRL_EVENT_GET) {
spin_lock_irqsave(&poll_aen_lock, flags);
megasas_poll_wait_aen = 0;
spin_unlock_irqrestore(&poll_aen_lock, flags);
@@ -2333,7 +2360,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
/*
* See if got an event notification
*/
- if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
megasas_service_aen(instance, cmd);
else
megasas_complete_int_cmd(instance, cmd);
@@ -2606,7 +2633,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
*instance->consumer =
- MEGASAS_ADPRESET_INPROG_SIGN;
+ cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
}
@@ -2983,7 +3010,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
}
memset(cmd->frame, 0, total_sz);
- cmd->frame->io.context = cmd->index;
+ cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
@@ -3143,13 +3170,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
- dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
+ dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
if (!megasas_issue_polled(instance, cmd)) {
ret = 0;
@@ -3164,16 +3191,16 @@ megasas_get_pd_list(struct megasas_instance *instance)
pd_addr = ci->addr;
if ( ret == 0 &&
- (ci->count <
+ (le32_to_cpu(ci->count) <
(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
memset(instance->pd_list, 0,
MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
- for (pd_index = 0; pd_index < ci->count; pd_index++) {
+ for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
instance->pd_list[pd_addr->deviceId].tid =
- pd_addr->deviceId;
+ le16_to_cpu(pd_addr->deviceId);
instance->pd_list[pd_addr->deviceId].driveType =
pd_addr->scsiDevType;
instance->pd_list[pd_addr->deviceId].driveState =
@@ -3207,6 +3234,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
struct megasas_dcmd_frame *dcmd;
struct MR_LD_LIST *ci;
dma_addr_t ci_h = 0;
+ u32 ld_count;
cmd = megasas_get_cmd(instance);
@@ -3233,12 +3261,12 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
- dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
- dcmd->opcode = MR_DCMD_LD_GET_LIST;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0;
if (!megasas_issue_polled(instance, cmd)) {
@@ -3247,12 +3275,14 @@ megasas_get_ld_list(struct megasas_instance *instance)
ret = -1;
}
+ ld_count = le32_to_cpu(ci->ldCount);
+
/* the following function will get the instance PD LIST */
- if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) {
+ if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
- for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
+ for (ld_index = 0; ld_index < ld_count; ld_index++) {
if (ci->ldList[ld_index].state != 0) {
ids = ci->ldList[ld_index].ref.targetId;
instance->ld_ids[ids] =
@@ -3271,6 +3301,87 @@ megasas_get_ld_list(struct megasas_instance *instance)
}
/**
+ * megasas_ld_list_query - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_TARGETID_LIST *ci;
+ dma_addr_t ci_h = 0;
+ u32 tgtid_count;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_WARNING
+ "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
+
+ if (!ci) {
+ printk(KERN_WARNING
+ "megasas: Failed to alloc mem for ld_list_query\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = query_type;
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->pad_0 = 0;
+
+ if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
+ ret = 0;
+ } else {
+ /* On failure, call older LD list DCMD */
+ ret = 1;
+ }
+
+ tgtid_count = le32_to_cpu(ci->count);
+
+ if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
+ ids = ci->targetId[ld_index];
+ instance->ld_ids[ids] = ci->targetId[ld_index];
+ }
+
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ ci, ci_h);
+
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+/**
* megasas_get_controller_info - Returns FW's controller structure
* @instance: Adapter soft state
* @ctrl_info: Controller information structure
@@ -3313,13 +3424,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
- dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
if (!megasas_issue_polled(instance, cmd)) {
ret = 0;
@@ -3375,17 +3486,20 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
init_frame->context = context;
- initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
- initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
+ initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
+ initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
- initq_info->producer_index_phys_addr_lo = instance->producer_h;
- initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
+ initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
+ initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF;
- init_frame->queue_info_new_phys_addr_lo = initq_info_h;
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(initq_info_h));
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(initq_info_h));
- init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
/*
* disable the intr before firing the init frame to FW
@@ -3648,7 +3762,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
megasas_get_pd_list(instance);
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
- megasas_get_ld_list(instance);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
@@ -3665,8 +3781,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
- ctrl_info->max_strips_per_io;
- max_sectors_2 = ctrl_info->max_request_size;
+ le16_to_cpu(ctrl_info->max_strips_per_io);
+ max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
@@ -3675,14 +3791,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->is_imr = 0;
dev_info(&instance->pdev->dev, "Controller type: MR,"
"Memory size is: %dMB\n",
- ctrl_info->memory_size);
+ le16_to_cpu(ctrl_info->memory_size));
} else {
instance->is_imr = 1;
dev_info(&instance->pdev->dev,
"Controller type: iMR\n");
}
+ /* OnOffProperties are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
instance->disableOnlineCtrlReset =
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ /* adapterOperations2 are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
instance->UnevenSpanSupport =
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
@@ -3696,7 +3816,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
}
-
instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512;
if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
@@ -3802,20 +3921,24 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
- dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = el_info_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
megasas_issue_blocked_cmd(instance, cmd);
/*
* Copy the data back into callers buffer
*/
- memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
+ eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+ eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+ eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+ eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+ eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
@@ -3862,6 +3985,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
if (instance->aen_cmd) {
prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+ prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
/*
* A class whose enum value is smaller is inclusive of all
@@ -3874,7 +3998,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
* values
*/
if ((prev_aen.members.class <= curr_aen.members.class) &&
- !((prev_aen.members.locale & curr_aen.members.locale) ^
+ !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^
curr_aen.members.locale)) {
/*
* Previously issued event registration includes
@@ -3882,7 +4006,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
*/
return 0;
} else {
- curr_aen.members.locale |= prev_aen.members.locale;
+ curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale);
if (prev_aen.members.class < curr_aen.members.class)
curr_aen.members.class = prev_aen.members.class;
@@ -3917,16 +4041,16 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
+ dcmd->mbox.w[0] = cpu_to_le32(seq_num);
instance->last_seq_num = seq_num;
- dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
- dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
- dcmd->mbox.w[0] = seq_num;
- dcmd->mbox.w[1] = curr_aen.word;
- dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
+ dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
if (instance->aen_cmd != NULL) {
megasas_return_cmd(instance, cmd);
@@ -3972,8 +4096,9 @@ static int megasas_start_aen(struct megasas_instance *instance)
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
- return megasas_register_aen(instance, eli.newest_seq_num + 1,
- class_locale.word);
+ return megasas_register_aen(instance,
+ le32_to_cpu(eli.newest_seq_num) + 1,
+ class_locale.word);
}
/**
@@ -4068,6 +4193,7 @@ megasas_set_dma_mask(struct pci_dev *pdev)
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
goto fail_set_dma_mask;
}
+
return 0;
fail_set_dma_mask:
@@ -4386,11 +4512,11 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 0;
- dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
- dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
megasas_issue_blocked_cmd(instance, cmd);
@@ -4431,11 +4557,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 0;
- dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
- dcmd->opcode = opcode;
+ dcmd->opcode = cpu_to_le32(opcode);
megasas_issue_blocked_cmd(instance, cmd);
@@ -4850,10 +4976,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* alone separately
*/
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
- cmd->frame->hdr.context = cmd->index;
+ cmd->frame->hdr.context = cpu_to_le32(cmd->index);
cmd->frame->hdr.pad_0 = 0;
- cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
- MFI_FRAME_SENSE64);
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
+ MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
/*
* The management interface between applications and the fw uses
@@ -4887,8 +5014,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* We don't change the dma_coherent_mask, so
* pci_alloc_consistent only returns 32bit addresses
*/
- kern_sge32[i].phys_addr = (u32) buf_handle;
- kern_sge32[i].length = ioc->sgl[i].iov_len;
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
/*
* We created a kernel buffer corresponding to the
@@ -4911,7 +5038,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
sense_ptr =
(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- *sense_ptr = sense_handle;
+ *sense_ptr = cpu_to_le32(sense_handle);
}
/*
@@ -4971,9 +5098,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
for (i = 0; i < ioc->sge_count; i++) {
if (kbuff_arr[i])
dma_free_coherent(&instance->pdev->dev,
- kern_sge32[i].length,
+ le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i],
- kern_sge32[i].phys_addr);
+ le32_to_cpu(kern_sge32[i].phys_addr));
}
megasas_return_cmd(instance, cmd);
@@ -5327,7 +5454,7 @@ megasas_aen_polling(struct work_struct *work)
host = instance->host;
if (instance->evt_detail) {
- switch (instance->evt_detail->code) {
+ switch (le32_to_cpu(instance->evt_detail->code)) {
case MR_EVT_PD_INSERTED:
if (megasas_get_pd_list(instance) == 0) {
for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@@ -5389,7 +5516,9 @@ megasas_aen_polling(struct work_struct *work)
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
- megasas_get_ld_list(instance);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5399,7 +5528,7 @@ megasas_aen_polling(struct work_struct *work)
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
- i + MEGASAS_MAX_LD_CHANNELS,
+ MEGASAS_MAX_PD_CHANNELS + i,
j,
0);
@@ -5418,7 +5547,9 @@ megasas_aen_polling(struct work_struct *work)
doscan = 0;
break;
case MR_EVT_LD_CREATED:
- megasas_get_ld_list(instance);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5427,14 +5558,14 @@ megasas_aen_polling(struct work_struct *work)
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
- i+MEGASAS_MAX_LD_CHANNELS,
+ MEGASAS_MAX_PD_CHANNELS + i,
j, 0);
if (instance->ld_ids[ld_index] !=
0xff) {
if (!sdev1) {
scsi_add_device(host,
- i + 2,
+ MEGASAS_MAX_PD_CHANNELS + i,
j, 0);
}
}
@@ -5483,18 +5614,20 @@ megasas_aen_polling(struct work_struct *work)
}
}
- megasas_get_ld_list(instance);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
ld_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
- i+MEGASAS_MAX_LD_CHANNELS, j, 0);
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
if (instance->ld_ids[ld_index] != 0xff) {
if (!sdev1) {
scsi_add_device(host,
- i+2,
+ MEGASAS_MAX_PD_CHANNELS + i,
j, 0);
} else {
scsi_device_put(sdev1);
@@ -5514,7 +5647,7 @@ megasas_aen_polling(struct work_struct *work)
return ;
}
- seq_num = instance->evt_detail->seq_num + 1;
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
/* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 4f401f7..e24b6eb 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -126,17 +126,17 @@ static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
}
-static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
{
- return map->raidMap.arMapInfo[ar].pd[arm];
+ return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
}
-static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
{
- return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
}
-static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
{
return map->raidMap.devHndlInfo[pd].curDevHdl;
}
@@ -148,7 +148,7 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
{
- return map->raidMap.ldTgtIdToLd[ldTgtId];
+ return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
}
static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -167,18 +167,22 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+ struct MR_LD_RAID *raid;
+ int ldCount, num_lds;
+ u16 ld;
+
- if (pFwRaidMap->totalSize !=
+ if (le32_to_cpu(pFwRaidMap->totalSize) !=
(sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) {
+ (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
(unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
sizeof(struct MR_LD_SPAN_MAP)) +
(sizeof(struct MR_LD_SPAN_MAP) *
- pFwRaidMap->ldCount)));
+ le32_to_cpu(pFwRaidMap->ldCount))));
printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
- pFwRaidMap->totalSize);
+ le32_to_cpu(pFwRaidMap->totalSize));
return 0;
}
@@ -187,6 +191,15 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
mr_update_load_balance_params(map, lbInfo);
+ num_lds = le32_to_cpu(map->raidMap.ldCount);
+
+ /*Convert Raid capability values to CPU arch */
+ for (ldCount = 0; ldCount < num_lds; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ raid = MR_LdRaidGet(ld, map);
+ le32_to_cpus((u32 *)&raid->capability);
+ }
+
return 1;
}
@@ -200,23 +213,20 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
- for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
+ for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
quad = &pSpanBlock->block_span_info.quad[j];
- if (quad->diff == 0)
+ if (le32_to_cpu(quad->diff) == 0)
return SPAN_INVALID;
- if (quad->logStart <= row && row <= quad->logEnd &&
- (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
+ if (le64_to_cpu(quad->logStart) <= row && row <=
+ le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) {
u64 blk, debugBlk;
- blk =
- mega_div64_32(
- (row-quad->logStart),
- quad->diff);
+ blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
debugBlk = blk;
- blk = (blk + quad->offsetInSpan) <<
- raid->stripeShift;
+ blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
*span_blk = blk;
}
return span;
@@ -257,8 +267,8 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
for (span = 0; span < raid->spanDepth; span++)
dev_dbg(&instance->pdev->dev, "Span=%x,"
" number of quads=%x\n", span,
- map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements);
+ le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements));
for (element = 0; element < MAX_QUAD_DEPTH; element++) {
span_set = &(ldSpanInfo[ld].span_set[element]);
if (span_set->span_row_data_width == 0)
@@ -286,22 +296,22 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
(long unsigned int)span_set->data_strip_end);
for (span = 0; span < raid->spanDepth; span++) {
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >=
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >=
element + 1) {
quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span].block_span_info.
quad[element];
dev_dbg(&instance->pdev->dev, "Span=%x,"
"Quad=%x, diff=%x\n", span,
- element, quad->diff);
+ element, le32_to_cpu(quad->diff));
dev_dbg(&instance->pdev->dev,
"offset_in_span=0x%08lx\n",
- (long unsigned int)quad->offsetInSpan);
+ (long unsigned int)le64_to_cpu(quad->offsetInSpan));
dev_dbg(&instance->pdev->dev,
"logical start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)quad->logStart,
- (long unsigned int)quad->logEnd);
+ (long unsigned int)le64_to_cpu(quad->logStart),
+ (long unsigned int)le64_to_cpu(quad->logEnd));
}
}
}
@@ -348,23 +358,23 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
continue;
for (span = 0; span < raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span].
block_span_info.quad[info];
- if (quad->diff == 0)
+ if (le32_to_cpu(quad->diff == 0))
return SPAN_INVALID;
- if (quad->logStart <= row &&
- row <= quad->logEnd &&
- (mega_mod64(row - quad->logStart,
- quad->diff)) == 0) {
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) {
u64 blk;
blk = mega_div64_32
- ((row - quad->logStart),
- quad->diff);
- blk = (blk + quad->offsetInSpan)
+ ((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
+ blk = (blk + le64_to_cpu(quad->offsetInSpan))
<< raid->stripeShift;
*span_blk = blk;
}
@@ -415,8 +425,8 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
span_set_Row = mega_div64_32(span_set_Strip,
span_set->span_row_data_width) * span_set->diff;
for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info+1)) {
if (strip_offset >=
span_set->strip_offset[span])
span_offset++;
@@ -480,18 +490,18 @@ static u64 get_strip_from_row(struct megasas_instance *instance,
continue;
for (span = 0; span < raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span].block_span_info.quad[info];
- if (quad->logStart <= row &&
- row <= quad->logEnd &&
- mega_mod64((row - quad->logStart),
- quad->diff) == 0) {
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ mega_mod64((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff)) == 0) {
strip = mega_div64_32
(((row - span_set->data_row_start)
- - quad->logStart),
- quad->diff);
+ - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
strip *= span_set->span_row_data_width;
strip += span_set->data_strip_start;
strip += span_set->strip_offset[span];
@@ -543,8 +553,8 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
span_set->span_row_data_width);
for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
if (strip_offset >=
span_set->strip_offset[span])
span_offset =
@@ -669,7 +679,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
}
- *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
return retval;
@@ -765,7 +775,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
}
- *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
return retval;
@@ -784,7 +794,7 @@ u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
{
struct MR_LD_RAID *raid;
u32 ld, stripSize, stripe_mask;
@@ -965,7 +975,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
regSize += stripSize;
}
- pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
+ pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
pRAID_Context->regLockFlags = (isRead) ?
@@ -974,9 +984,12 @@ MR_BuildRaidContext(struct megasas_instance *instance,
pRAID_Context->regLockFlags = (isRead) ?
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
pRAID_Context->VirtualDiskTgtId = raid->targetId;
- pRAID_Context->regLockRowLBA = regStart;
- pRAID_Context->regLockLength = regSize;
+ pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
+ pRAID_Context->regLockLength = cpu_to_le32(regSize);
pRAID_Context->configSeqNum = raid->seqNum;
+ /* save pointer to raid->LUN array */
+ *raidLUN = raid->LUN;
+
/*Get Phy Params only if FP capable, or else leave it to MR firmware
to do the calculation.*/
@@ -1047,8 +1060,8 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
raid = MR_LdRaidGet(ld, map);
for (element = 0; element < MAX_QUAD_DEPTH; element++) {
for (span = 0; span < raid->spanDepth; span++) {
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements <
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) <
element + 1)
continue;
span_set = &(ldSpanInfo[ld].span_set[element]);
@@ -1056,14 +1069,14 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
spanBlock[span].block_span_info.
quad[element];
- span_set->diff = quad->diff;
+ span_set->diff = le32_to_cpu(quad->diff);
for (count = 0, span_row_width = 0;
count < raid->spanDepth; count++) {
- if (map->raidMap.ldSpanMap[ld].
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
spanBlock[count].
block_span_info.
- noElements >= element + 1) {
+ noElements) >= element + 1) {
span_set->strip_offset[count] =
span_row_width;
span_row_width +=
@@ -1077,9 +1090,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
}
span_set->span_row_data_width = span_row_width;
- span_row = mega_div64_32(((quad->logEnd -
- quad->logStart) + quad->diff),
- quad->diff);
+ span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
+ le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
+ le32_to_cpu(quad->diff));
if (element == 0) {
span_set->log_start_lba = 0;
@@ -1096,7 +1109,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
span_set->data_row_start = 0;
span_set->data_row_end =
- (span_row * quad->diff) - 1;
+ (span_row * le32_to_cpu(quad->diff)) - 1;
} else {
span_set_prev = &(ldSpanInfo[ld].
span_set[element - 1]);
@@ -1122,7 +1135,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
span_set_prev->data_row_end + 1;
span_set->data_row_end =
span_set->data_row_start +
- (span_row * quad->diff) - 1;
+ (span_row * le32_to_cpu(quad->diff)) - 1;
}
break;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 417d5f1..f655592 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -72,17 +72,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
int
megasas_issue_polled(struct megasas_instance *instance,
struct megasas_cmd *cmd);
-
-u8
-MR_BuildRaidContext(struct megasas_instance *instance,
- struct IO_REQUEST_INFO *io_info,
- struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-
void
megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
@@ -626,23 +615,20 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
- IOCInitMessage->MsgVersion = MPI2_VERSION;
- IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION;
- IOCInitMessage->SystemRequestFrameSize =
- MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
-
- IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth;
- IOCInitMessage->ReplyDescriptorPostQueueAddress =
- fusion->reply_frames_desc_phys;
- IOCInitMessage->SystemRequestFrameBaseAddress =
- fusion->io_request_frames_phys;
+ IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
+ IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+ IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
+
+ IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
+ IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+ IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF;
@@ -652,17 +638,24 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
init_frame->driver_operations.
mfi_capabilities.support_additional_msix = 1;
+ /* driver supports HA / Remote LUN over Fast Path interface */
+ init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
+ = 1;
+ /* Convert capability to LE32 */
+ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
- init_frame->queue_info_new_phys_addr_lo = ioc_init_handle;
- init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
+ init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
req_desc =
(union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
- req_desc->Words = cmd->frame_phys_addr;
+ req_desc->Words = 0;
req_desc->MFAIo.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cpu_to_le32s((u32 *)&req_desc->MFAIo);
+ req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
/*
* disable the intr before firing the init frame
@@ -753,13 +746,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = size_map_info;
- dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = size_map_info;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
if (!megasas_issue_polled(instance, cmd))
ret = 0;
@@ -828,7 +821,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
map = fusion->ld_map[instance->map_id & 1];
- num_lds = map->raidMap.ldCount;
+ num_lds = le32_to_cpu(map->raidMap.ldCount);
dcmd = &cmd->frame->dcmd;
@@ -856,15 +849,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_WRITE;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = size_map_info;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
- dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = size_map_info;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
instance->map_update_cmd = cmd;
@@ -1067,9 +1060,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
spin_lock_irqsave(&instance->hba_lock, flags);
- writel(req_desc_lo,
- &(regs)->inbound_low_queue_port);
- writel(req_desc_hi, &(regs)->inbound_high_queue_port);
+ writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -1157,8 +1149,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
return sge_count;
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- sgl_ptr->Length = sg_dma_len(os_sgl);
- sgl_ptr->Address = sg_dma_address(os_sgl);
+ sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
+ sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
sgl_ptr->Flags = 0;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
@@ -1177,9 +1169,9 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) {
- if ((cmd->io_request->IoFlags &
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ if ((le16_to_cpu(cmd->io_request->IoFlags) &
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
cmd->io_request->ChainOffset =
fusion->
chain_offset_io_request;
@@ -1201,9 +1193,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sg_chain->Flags =
(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
- sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION)
- *(sge_count - sg_processed));
- sg_chain->Address = cmd->sg_frame_phys_addr;
+ sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
+ sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
sgl_ptr =
(struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
@@ -1261,7 +1252,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
io_request->CDB.EEDP32.PrimaryReferenceTag =
cpu_to_be32(ref_tag);
io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
- io_request->IoFlags = 32; /* Specify 32-byte cdb */
+ io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
/* Transfer length */
cdb[28] = (u8)((num_blocks >> 24) & 0xff);
@@ -1271,19 +1262,19 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
/* set SCSI IO EEDPFlags */
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
- io_request->EEDPFlags =
+ io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
} else {
- io_request->EEDPFlags =
+ io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
}
- io_request->Control |= (0x4 << 26);
- io_request->EEDPBlockSize = scp->device->sector_size;
+ io_request->Control |= cpu_to_le32((0x4 << 26));
+ io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
} else {
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1311,7 +1302,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[8] = (u8)(num_blocks & 0xff);
cdb[7] = (u8)((num_blocks >> 8) & 0xff);
- io_request->IoFlags = 10; /* Specify 10-byte cdb */
+ io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
cdb_len = 10;
} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
/* Convert to 16 byte CDB for large LBA's */
@@ -1349,7 +1340,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[11] = (u8)((num_blocks >> 16) & 0xff);
cdb[10] = (u8)((num_blocks >> 24) & 0xff);
- io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
cdb_len = 16;
}
@@ -1410,13 +1401,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
struct IO_REQUEST_INFO io_info;
struct fusion_context *fusion;
struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+ u8 *raidLUN;
device_id = MEGASAS_DEV_INDEX(instance, scp);
fusion = instance->ctrl_context;
io_request = cmd->io_request;
- io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
io_request->RaidContext.status = 0;
io_request->RaidContext.exStatus = 0;
@@ -1480,7 +1472,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
io_info.numBlocks = datalength;
io_info.ldTgtId = device_id;
- io_request->DataLength = scsi_bufflen(scp);
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
io_info.isRead = 1;
@@ -1494,7 +1486,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
} else {
if (MR_BuildRaidContext(instance, &io_info,
&io_request->RaidContext,
- local_map_ptr))
+ local_map_ptr, &raidLUN))
fp_possible = io_info.fpOkForIo;
}
@@ -1520,8 +1512,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
io_request->RaidContext.Type = MPI2_TYPE_CUDA;
io_request->RaidContext.nseg = 0x1;
- io_request->IoFlags |=
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
io_request->RaidContext.regLockFlags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
@@ -1537,9 +1528,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
io_request->DevHandle = io_info.devHandle;
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raidLUN, 8);
} else {
io_request->RaidContext.timeoutValue =
- local_map_ptr->raidMap.fpPdIoTimeoutSec;
+ cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1557,7 +1550,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_request->RaidContext.nseg = 0x1;
}
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
+ io_request->DevHandle = cpu_to_le16(device_id);
} /* Not FP */
}
@@ -1579,6 +1572,11 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
u16 pd_index = 0;
struct MR_FW_RAID_MAP_ALL *local_map_ptr;
struct fusion_context *fusion = instance->ctrl_context;
+ u8 span, physArm;
+ u16 devHandle;
+ u32 ld, arRef, pd;
+ struct MR_LD_RAID *raid;
+ struct RAID_CONTEXT *pRAID_Context;
io_request = cmd->io_request;
device_id = MEGASAS_DEV_INDEX(instance, scmd);
@@ -1586,6 +1584,9 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
+scmd->device->id;
local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+
+
/* Check if this is a system PD I/O */
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
@@ -1623,15 +1624,62 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
scmd->request->timeout / HZ;
}
} else {
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+ goto NonFastPath;
+
+ ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+ if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
+ goto NonFastPath;
+
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+
+ /* check if this LD is FP capable */
+ if (!(raid->capability.fpNonRWCapable))
+ /* not FP capable, send as non-FP */
+ goto NonFastPath;
+
+ /* get RAID_Context pointer */
+ pRAID_Context = &io_request->RaidContext;
+
+ /* set RAID context values */
+ pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
+ pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd;
+ pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+ pRAID_Context->regLockRowLBA = 0;
+ pRAID_Context->regLockLength = 0;
+ pRAID_Context->configSeqNum = raid->seqNum;
+
+ /* get the DevHandle for the PD (since this is
+ fpNonRWCapable, this is a single disk RAID0) */
+ span = physArm = 0;
+ arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
+ pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
+ devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
+
+ /* build request descriptor */
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle = devHandle;
+
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raid->LUN, 8);
+
+ /* build the raidScsiIO structure */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->DevHandle = devHandle;
+
+ return;
+
+NonFastPath:
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
+ io_request->DevHandle = cpu_to_le16(device_id);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
- io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
io_request->LUN[1] = scmd->device->lun;
- io_request->DataLength = scsi_bufflen(scmd);
}
/**
@@ -1670,7 +1718,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
* Just the CDB length,rest of the Flags are zero
* This will be modified for FP in build_ldio_fusion
*/
- io_request->IoFlags = scp->cmd_len;
+ io_request->IoFlags = cpu_to_le16(scp->cmd_len);
if (megasas_is_ldio(scp))
megasas_build_ldio_fusion(instance, scp, cmd);
@@ -1695,17 +1743,17 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->RaidContext.numSGE = sge_count;
- io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+ io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
if (scp->sc_data_direction == PCI_DMA_TODEVICE)
- io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
- io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
io_request->SGLOffset0 =
offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
- io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
+ io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
@@ -1770,7 +1818,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
}
req_desc = cmd->request_desc;
- req_desc->SCSIIO.SMID = index;
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
if (cmd->io_request->ChainOffset != 0 &&
cmd->io_request->ChainOffset != 0xF)
@@ -1832,7 +1880,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
num_completed = 0;
while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
- smid = reply_desc->SMID;
+ smid = le16_to_cpu(reply_desc->SMID);
cmd_fusion = fusion->cmd_list[smid - 1];
@@ -2050,12 +2098,12 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
SGL) / 4;
io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
- mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
+ mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
- mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME;
+ mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
return 0;
}
@@ -2088,7 +2136,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- req_desc->SCSIIO.SMID = index;
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
return req_desc;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 4eb8401..35a5139 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -93,8 +93,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
*/
struct RAID_CONTEXT {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 nseg:4;
+ u8 Type:4;
+#else
u8 Type:4;
u8 nseg:4;
+#endif
u8 resvd0;
u16 timeoutValue;
u8 regLockFlags;
@@ -298,8 +303,13 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
* MPT RAID MFA IO Descriptor.
*/
struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 MessageAddress1:24; /* bits 31:8*/
+ u32 RequestFlags:8;
+#else
u32 RequestFlags:8;
u32 MessageAddress1:24; /* bits 31:8*/
+#endif
u32 MessageAddress2; /* bits 61:32 */
};
@@ -518,6 +528,19 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_LD_RAID {
struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved4:7;
+ u32 fpNonRWCapable:1;
+ u32 fpReadAcrossStripe:1;
+ u32 fpWriteAcrossStripe:1;
+ u32 fpReadCapable:1;
+ u32 fpWriteCapable:1;
+ u32 encryptionType:8;
+ u32 pdPiMode:4;
+ u32 ldPiMode:4;
+ u32 reserved5:3;
+ u32 fpCapable:1;
+#else
u32 fpCapable:1;
u32 reserved5:3;
u32 ldPiMode:4;
@@ -527,7 +550,9 @@ struct MR_LD_RAID {
u32 fpReadCapable:1;
u32 fpWriteAcrossStripe:1;
u32 fpReadAcrossStripe:1;
- u32 reserved4:8;
+ u32 fpNonRWCapable:1;
+ u32 reserved4:7;
+#endif
} capability;
u32 reserved6;
u64 size;
@@ -551,7 +576,9 @@ struct MR_LD_RAID {
u32 reserved:31;
} flags;
- u8 reserved3[0x5C];
+ u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
+ u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
+ u8 reserved3[0x80-0x2D]; /* 0x2D */
};
struct MR_LD_SPAN_MAP {
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 4c1d2e7..efb0c4c 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -1,5 +1,5 @@
# mpt3sas makefile
-obj-m += mpt3sas.o
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
mpt3sas-y += mpt3sas_base.o \
mpt3sas_config.o \
mpt3sas_scsih.o \
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index ff12d46..5964800 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -10,7 +10,7 @@
*
* Forward port and refactoring to modern qla2xxx and target/configfs
*
- * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index a6da313..f85b9e5 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -2,12 +2,9 @@
* This file contains tcm implementation using v4 configfs fabric infrastructure
* for QLogic target mode HBAs
*
- * ?? Copyright 2010-2011 RisingTide Systems LLC.
+ * (c) Copyright 2010-2013 Datera, Inc.
*
- * Licensed to the Linux Foundation under the General Public License (GPL)
- * version 2.
- *
- * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ * Author: Nicholas A. Bellinger <nab@daterainc.com>
*
* tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
* the TCM_FC / Open-FCoE.org fabric module.
@@ -360,6 +357,14 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
}
+static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only;
+}
+
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
@@ -489,38 +494,13 @@ static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
return 0;
}
-/*
- * The LIO target core uses DMA_TO_DEVICE to mean that data is going
- * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
- * that data is coming from the target (eg handling a READ). However,
- * this is just the opposite of what we have to tell the DMA mapping
- * layer -- eg when handling a READ, the HBA will have to DMA the data
- * out of memory so it can send it to the initiator, which means we
- * need to use DMA_TO_DEVICE when we map the data.
- */
-static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
-{
- if (se_cmd->se_cmd_flags & SCF_BIDI)
- return DMA_BIDIRECTIONAL;
-
- switch (se_cmd->data_direction) {
- case DMA_TO_DEVICE:
- return DMA_FROM_DEVICE;
- case DMA_FROM_DEVICE:
- return DMA_TO_DEVICE;
- case DMA_NONE:
- default:
- return DMA_NONE;
- }
-}
-
static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
cmd->bufflen = se_cmd->data_length;
- cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
@@ -656,7 +636,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
struct qla_tgt_cmd, se_cmd);
cmd->bufflen = se_cmd->data_length;
- cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
cmd->sg_cnt = se_cmd->t_data_nents;
@@ -680,7 +660,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg = NULL;
cmd->sg_cnt = 0;
cmd->offset = 0;
- cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -939,11 +919,19 @@ DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only);
+DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
+QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
&tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
&tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
&tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
&tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+ &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr,
NULL,
};
@@ -1042,6 +1030,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+ QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1;
ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -1736,7 +1725,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
tcm_qla2xxx_check_demo_write_protect,
.tpg_check_prod_mode_write_protect =
tcm_qla2xxx_check_prod_write_protect,
- .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
@@ -1784,7 +1773,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
- .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 9ba075f..3293275 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -29,6 +29,7 @@ struct tcm_qla2xxx_tpg_attrib {
int cache_dynamic_acls;
int demo_mode_write_protect;
int prod_mode_write_protect;
+ int demo_mode_login_only;
};
struct tcm_qla2xxx_tpg {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b58e8f8..e62d17d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2420,14 +2420,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
}
- if (modepage == 0x3F) {
- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
- "present\n");
- goto defaults;
- } else if ((buffer[offset] & 0x3f) != modepage) {
- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
- goto defaults;
- }
+ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+ goto defaults;
+
Page_found:
if (modepage == 8) {
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index bce09a6..7210500 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -177,6 +177,7 @@ enum {
MASK_TASK_RESPONSE = 0xFF00,
MASK_RSP_UPIU_RESULT = 0xFFFF,
MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
+ MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
MASK_RSP_EXCEPTION_EVENT = 0x10000,
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b36ca9a..04884d6 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -36,9 +36,11 @@
#include <linux/async.h>
#include "ufshcd.h"
+#include "unipro.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
+ UIC_POWER_MODE |\
UFSHCD_ERROR_MASK)
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
@@ -56,6 +58,9 @@
/* Expose the flag value from utp_upiu_query.value */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
+/* Interrupt aggregation default timeout, unit: 40us */
+#define INT_AGGR_DEF_TO 0x02
+
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
@@ -78,12 +83,6 @@ enum {
UFSHCD_INT_CLEAR,
};
-/* Interrupt aggregation options */
-enum {
- INT_AGGR_RESET,
- INT_AGGR_CONFIG,
-};
-
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
@@ -238,6 +237,18 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
}
/**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
* ufshcd_get_req_rsp - returns the TR response transaction type
* @ucd_rsp_ptr: pointer to response UPIU
*/
@@ -260,6 +271,20 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
}
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ * from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
/**
* ufshcd_is_exception_event - Check if the device raised an exception event
* @ucd_rsp_ptr: pointer to response UPIU
@@ -276,30 +301,30 @@ static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
}
/**
- * ufshcd_config_int_aggr - Configure interrupt aggregation values.
- * Currently there is no use case where we want to configure
- * interrupt aggregation dynamically. So to configure interrupt
- * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
- * INT_AGGR_TIMEOUT_VALUE are used.
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
* @hba: per adapter instance
- * @option: Interrupt aggregation option
*/
static inline void
-ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
{
- switch (option) {
- case INT_AGGR_RESET:
- ufshcd_writel(hba, INT_AGGR_ENABLE |
- INT_AGGR_COUNTER_AND_TIMER_RESET,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
- break;
- case INT_AGGR_CONFIG:
- ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
- INT_AGGR_COUNTER_THRESHOLD_VALUE |
- INT_AGGR_TIMEOUT_VALUE,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
- break;
- }
+ ufshcd_writel(hba, INT_AGGR_ENABLE |
+ INT_AGGR_COUNTER_AND_TIMER_RESET,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+ INT_AGGR_COUNTER_THLD_VAL(cnt) |
+ INT_AGGR_TIMEOUT_VAL(tmout),
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
}
/**
@@ -355,7 +380,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{
int len;
- if (lrbp->sense_buffer) {
+ if (lrbp->sense_buffer &&
+ ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
@@ -446,6 +472,18 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
}
/**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the UPMCRS field of HCS register
+ * Returns value of UPMCRS field
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
* ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
* @hba: per adapter instance
* @uic_cmd: UIC command
@@ -1362,6 +1400,202 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
}
/**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @attr_set: attribute set type as uic command argument2
+ * @mib_val: setting value as uic command argument3
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-set",
+ "dme-peer-set"
+ };
+ const char *set = action[!!peer];
+ int ret;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+ uic_cmd.argument1 = attr_sel;
+ uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+ uic_cmd.argument3 = mib_val;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @mib_val: the value of the attribute as returned by the UIC command
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-get",
+ "dme-peer-get"
+ };
+ const char *get = action[!!peer];
+ int ret;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+ uic_cmd.argument1 = attr_sel;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret) {
+ dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ goto out;
+ }
+
+ if (mib_val)
+ *mib_val = uic_cmd.argument3;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ * using DME_SET primitives.
+ * @hba: per adapter instance
+ * @mode: powr mode value
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+ struct uic_command uic_cmd = {0};
+ struct completion pwr_done;
+ unsigned long flags;
+ u8 status;
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_SET;
+ uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+ uic_cmd.argument3 = mode;
+ init_completion(&pwr_done);
+
+ mutex_lock(&hba->uic_cmd_mutex);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pwr_done = &pwr_done;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr mode change with mode 0x%x uic error %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(hba->pwr_done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ dev_err(hba->dev,
+ "pwr mode change with mode 0x%x completion timeout\n",
+ mode);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = ufshcd_get_upmcrs(hba);
+ if (status != PWR_LOCAL) {
+ dev_err(hba->dev,
+ "pwr mode change failed, host umpcrs:0x%x\n",
+ status);
+ ret = (status != PWR_OK) ? status : -1;
+ }
+out:
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pwr_done = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+ return ret;
+}
+
+/**
+ * ufshcd_config_max_pwr_mode - Set & Change power mode with
+ * maximum capability attribute information.
+ * @hba: per adapter instance
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
+{
+ enum {RX = 0, TX = 1};
+ u32 lanes[] = {1, 1};
+ u32 gear[] = {1, 1};
+ u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
+ int ret;
+
+ /* Get the connected lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
+
+ /*
+ * First, get the maximum gears of HS speed.
+ * If a zero value, it means there is no HSGEAR capability.
+ * Then, get the maximum gears of PWM speed.
+ */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
+ if (!gear[RX]) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
+ pwr[RX] = SLOWAUTO_MODE;
+ }
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
+ if (!gear[TX]) {
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &gear[TX]);
+ pwr[TX] = SLOWAUTO_MODE;
+ }
+
+ /*
+ * Configure attributes for power mode change with below.
+ * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+ * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+ * - PA_HSSERIES
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
+ if (pwr[RX] == FASTAUTO_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
+ if (pwr[TX] == FASTAUTO_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+
+ if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
+
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
+ if (ret)
+ dev_err(hba->dev,
+ "pwr_mode: power mode change failed %d\n", ret);
+
+ return ret;
+}
+
+/**
* ufshcd_complete_dev_init() - checks device readiness
* hba: per-adapter instance
*
@@ -1442,7 +1676,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
/* Configure interrupt aggregation */
- ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
+ ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
/* Configure UTRL and UTMRL base address registers */
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
@@ -1788,32 +2022,24 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
int result = 0;
switch (scsi_status) {
- case SAM_STAT_GOOD:
- result |= DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
- break;
case SAM_STAT_CHECK_CONDITION:
+ ufshcd_copy_sense_data(lrbp);
+ case SAM_STAT_GOOD:
result |= DID_OK << 16 |
COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
- ufshcd_copy_sense_data(lrbp);
- break;
- case SAM_STAT_BUSY:
- result |= SAM_STAT_BUSY;
+ scsi_status;
break;
case SAM_STAT_TASK_SET_FULL:
-
/*
* If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
* depth needs to be adjusted to the exact number of
* outstanding commands the LUN can handle at any given time.
*/
ufshcd_adjust_lun_qdepth(lrbp->cmd);
- result |= SAM_STAT_TASK_SET_FULL;
- break;
+ case SAM_STAT_BUSY:
case SAM_STAT_TASK_ABORTED:
- result |= SAM_STAT_TASK_ABORTED;
+ ufshcd_copy_sense_data(lrbp);
+ result |= scsi_status;
break;
default:
result |= DID_ERROR << 16;
@@ -1898,14 +2124,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/**
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
+ * @intr_status: interrupt status generated by the controller
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
+static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
- if (hba->active_uic_cmd) {
+ if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
+ hba->active_uic_cmd->argument3 =
+ ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
}
+
+ if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
+ complete(hba->pwr_done);
}
/**
@@ -1960,7 +2192,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
/* Reset interrupt aggregation counters */
if (int_aggr_reset)
- ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+ ufshcd_reset_intr_aggr(hba);
}
/**
@@ -2251,8 +2483,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (hba->errors)
ufshcd_err_handler(hba);
- if (intr_status & UIC_COMMAND_COMPL)
- ufshcd_uic_cmd_compl(hba);
+ if (intr_status & UFSHCD_UIC_MASK)
+ ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
ufshcd_tmc_handler(hba);
@@ -2494,6 +2726,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
if (ret)
goto out;
+ ufshcd_config_max_pwr_mode(hba);
+
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 59c9c48..577679a 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -175,6 +175,7 @@ struct ufs_dev_cmd {
* @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command
* @ufshcd_tm_wait_queue: wait queue for task management
+ * @pwr_done: completion for power mode change
* @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states
* @intr_mask: Interrupt Mask Bits
@@ -219,6 +220,8 @@ struct ufs_hba {
wait_queue_head_t ufshcd_tm_wait_queue;
unsigned long tm_condition;
+ struct completion *pwr_done;
+
u32 ufshcd_state;
u32 intr_mask;
u16 ee_ctrl_mask;
@@ -263,4 +266,55 @@ static inline void check_upiu_size(void)
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
+extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer);
+extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer);
+
+/* UIC command interfaces for DME primitives */
+#define DME_LOCAL 0
+#define DME_PEER 1
+#define ATTR_SET_NOR 0 /* NORMAL */
+#define ATTR_SET_ST 1 /* STATIC */
+
+static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
+}
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index f1e1b74..0475c66 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -124,6 +124,9 @@ enum {
#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
+#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\
+ UIC_POWER_MODE)
+
#define UFSHCD_ERROR_MASK (UIC_ERROR |\
DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
@@ -142,6 +145,15 @@ enum {
#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
+enum {
+ PWR_OK = 0x0,
+ PWR_LOCAL = 0x01,
+ PWR_REMOTE = 0x02,
+ PWR_BUSY = 0x03,
+ PWR_ERROR_CAP = 0x04,
+ PWR_FATAL_ERROR = 0x05,
+};
+
/* HCE - Host Controller Enable 34h */
#define CONTROLLER_ENABLE UFS_BIT(0)
#define CONTROLLER_DISABLE 0x0
@@ -191,6 +203,12 @@ enum {
#define CONFIG_RESULT_CODE_MASK 0xFF
#define GENERIC_ERROR_CODE_MASK 0xFF
+#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
+ ((sel) & 0xFFFF))
+#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
+#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
+#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
+
/* UIC Commands */
enum {
UIC_CMD_DME_GET = 0x01,
@@ -226,8 +244,8 @@ enum {
#define MASK_UIC_COMMAND_RESULT 0xFF
-#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8)
-#define INT_AGGR_TIMEOUT_VALUE (0x02)
+#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
+#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
/* Interrupt disable masks */
enum {
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
new file mode 100644
index 0000000..0bb8041
--- /dev/null
+++ b/drivers/scsi/ufs/unipro.h
@@ -0,0 +1,151 @@
+/*
+ * drivers/scsi/ufs/unipro.h
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UNIPRO_H_
+#define _UNIPRO_H_
+
+/*
+ * PHY Adpater attributes
+ */
+#define PA_ACTIVETXDATALANES 0x1560
+#define PA_ACTIVERXDATALANES 0x1580
+#define PA_TXTRAILINGCLOCKS 0x1564
+#define PA_PHY_TYPE 0x1500
+#define PA_AVAILTXDATALANES 0x1520
+#define PA_AVAILRXDATALANES 0x1540
+#define PA_MINRXTRAILINGCLOCKS 0x1543
+#define PA_TXPWRSTATUS 0x1567
+#define PA_RXPWRSTATUS 0x1582
+#define PA_TXFORCECLOCK 0x1562
+#define PA_TXPWRMODE 0x1563
+#define PA_LEGACYDPHYESCDL 0x1570
+#define PA_MAXTXSPEEDFAST 0x1521
+#define PA_MAXTXSPEEDSLOW 0x1522
+#define PA_MAXRXSPEEDFAST 0x1541
+#define PA_MAXRXSPEEDSLOW 0x1542
+#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_TXSPEEDFAST 0x1565
+#define PA_TXSPEEDSLOW 0x1566
+#define PA_REMOTEVERINFO 0x15A0
+#define PA_TXGEAR 0x1568
+#define PA_TXTERMINATION 0x1569
+#define PA_HSSERIES 0x156A
+#define PA_PWRMODE 0x1571
+#define PA_RXGEAR 0x1583
+#define PA_RXTERMINATION 0x1584
+#define PA_MAXRXPWMGEAR 0x1586
+#define PA_MAXRXHSGEAR 0x1587
+#define PA_RXHSUNTERMCAP 0x15A5
+#define PA_RXLSTERMCAP 0x15A6
+#define PA_PACPREQTIMEOUT 0x1590
+#define PA_PACPREQEOBTIMEOUT 0x1591
+#define PA_HIBERN8TIME 0x15A7
+#define PA_LOCALVERINFO 0x15A9
+#define PA_TACTIVATE 0x15A8
+#define PA_PACPFRAMECOUNT 0x15C0
+#define PA_PACPERRORCOUNT 0x15C1
+#define PA_PHYTESTCONTROL 0x15C2
+#define PA_PWRMODEUSERDATA0 0x15B0
+#define PA_PWRMODEUSERDATA1 0x15B1
+#define PA_PWRMODEUSERDATA2 0x15B2
+#define PA_PWRMODEUSERDATA3 0x15B3
+#define PA_PWRMODEUSERDATA4 0x15B4
+#define PA_PWRMODEUSERDATA5 0x15B5
+#define PA_PWRMODEUSERDATA6 0x15B6
+#define PA_PWRMODEUSERDATA7 0x15B7
+#define PA_PWRMODEUSERDATA8 0x15B8
+#define PA_PWRMODEUSERDATA9 0x15B9
+#define PA_PWRMODEUSERDATA10 0x15BA
+#define PA_PWRMODEUSERDATA11 0x15BB
+#define PA_CONNECTEDTXDATALANES 0x1561
+#define PA_CONNECTEDRXDATALANES 0x1581
+#define PA_LOGICALLANEMAP 0x15A1
+#define PA_SLEEPNOCONFIGTIME 0x15A2
+#define PA_STALLNOCONFIGTIME 0x15A3
+#define PA_SAVECONFIGTIME 0x15A4
+
+/* PA power modes */
+enum {
+ FAST_MODE = 1,
+ SLOW_MODE = 2,
+ FASTAUTO_MODE = 4,
+ SLOWAUTO_MODE = 5,
+ UNCHANGED = 7,
+};
+
+/* PA TX/RX Frequency Series */
+enum {
+ PA_HS_MODE_A = 1,
+ PA_HS_MODE_B = 2,
+};
+
+/*
+ * Data Link Layer Attributes
+ */
+#define DL_TC0TXFCTHRESHOLD 0x2040
+#define DL_FC0PROTTIMEOUTVAL 0x2041
+#define DL_TC0REPLAYTIMEOUTVAL 0x2042
+#define DL_AFC0REQTIMEOUTVAL 0x2043
+#define DL_AFC0CREDITTHRESHOLD 0x2044
+#define DL_TC0OUTACKTHRESHOLD 0x2045
+#define DL_TC1TXFCTHRESHOLD 0x2060
+#define DL_FC1PROTTIMEOUTVAL 0x2061
+#define DL_TC1REPLAYTIMEOUTVAL 0x2062
+#define DL_AFC1REQTIMEOUTVAL 0x2063
+#define DL_AFC1CREDITTHRESHOLD 0x2064
+#define DL_TC1OUTACKTHRESHOLD 0x2065
+#define DL_TXPREEMPTIONCAP 0x2000
+#define DL_TC0TXMAXSDUSIZE 0x2001
+#define DL_TC0RXINITCREDITVAL 0x2002
+#define DL_TC0TXBUFFERSIZE 0x2005
+#define DL_PEERTC0PRESENT 0x2046
+#define DL_PEERTC0RXINITCREVAL 0x2047
+#define DL_TC1TXMAXSDUSIZE 0x2003
+#define DL_TC1RXINITCREDITVAL 0x2004
+#define DL_TC1TXBUFFERSIZE 0x2006
+#define DL_PEERTC1PRESENT 0x2066
+#define DL_PEERTC1RXINITCREVAL 0x2067
+
+/*
+ * Network Layer Attributes
+ */
+#define N_DEVICEID 0x3000
+#define N_DEVICEID_VALID 0x3001
+#define N_TC0TXMAXSDUSIZE 0x3020
+#define N_TC1TXMAXSDUSIZE 0x3021
+
+/*
+ * Transport Layer Attributes
+ */
+#define T_NUMCPORTS 0x4000
+#define T_NUMTESTFEATURES 0x4001
+#define T_CONNECTIONSTATE 0x4020
+#define T_PEERDEVICEID 0x4021
+#define T_PEERCPORTID 0x4022
+#define T_TRAFFICCLASS 0x4023
+#define T_PROTOCOLID 0x4024
+#define T_CPORTFLAGS 0x4025
+#define T_TXTOKENVALUE 0x4026
+#define T_RXTOKENVALUE 0x4027
+#define T_LOCALBUFFERSPACE 0x4028
+#define T_PEERBUFFERSPACE 0x4029
+#define T_CREDITSTOSEND 0x402A
+#define T_CPORTMODE 0x402B
+#define T_TC0TXMAXSDUSIZE 0x4060
+#define T_TC1TXMAXSDUSIZE 0x4061
+
+/* Boolean attribute values */
+enum {
+ FALSE = 0,
+ TRUE,
+};
+
+#endif /* _UNIPRO_H_ */
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0170d4c..b9c53cc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -55,7 +55,6 @@ comment "SPI Master Controller Drivers"
config SPI_ALTERA
tristate "Altera SPI Controller"
- depends on GENERIC_HARDIRQS
select SPI_BITBANG
help
This is the driver for the Altera SPI Controller.
@@ -358,7 +357,7 @@ config SPI_PXA2XX_DMA
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
- depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS
+ depends on (ARCH_PXA || PCI || ACPI)
select PXA_SSP if ARCH_PXA
help
This enables using a PXA2xx or Sodaville SSP port as a SPI master
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 21a3f72..8e76ddc 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -341,27 +341,26 @@ out:
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
*
- * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
- * many objects (pages) we have in total.
+ * 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
- * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
-static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long
+ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
+ unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
- if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
- return -1;
- if (!sc->nr_to_scan)
- return lru_count;
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
@@ -374,17 +373,32 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
- sc->nr_to_scan -= range_size(range);
- if (sc->nr_to_scan <= 0)
+ freed += range_size(range);
+ if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
+ return freed;
+}
+static unsigned long
+ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ /*
+ * note that lru_count is count of pages on the lru, not a count of
+ * objects on the list. This means the scan function needs to return the
+ * number of pages freed, not the number of objects scanned.
+ */
return lru_count;
}
static struct shrinker ashmem_shrinker = {
- .shrink = ashmem_shrink,
+ .count_objects = ashmem_shrink_count,
+ .scan_objects = ashmem_shrink_scan,
+ /*
+ * XXX (dchinner): I wish people would comment on why they need on
+ * significant changes to the default value here
+ */
.seeks = DEFAULT_SEEKS * 4,
};
@@ -690,11 +704,11 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (capable(CAP_SYS_ADMIN)) {
struct shrink_control sc = {
.gfp_mask = GFP_KERNEL,
- .nr_to_scan = 0,
+ .nr_to_scan = LONG_MAX,
};
- ret = ashmem_shrink(&ashmem_shrinker, &sc);
- sc.nr_to_scan = ret;
- ashmem_shrink(&ashmem_shrinker, &sc);
+
+ nodes_setall(sc.nodes_to_scan);
+ ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
}
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index a8c3444..d42f578 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -481,7 +481,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
header.sec = now.tv_sec;
header.nsec = now.tv_nsec;
header.euid = current_euid();
- header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+ header.len = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
header.hdr_size = sizeof(struct logger_entry);
/* null writes succeed, return zero */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index fe74494..6f094b3 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -66,11 +66,20 @@ static unsigned long lowmem_deathpending_timeout;
pr_info(x); \
} while (0)
-static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long lowmem_count(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ return global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_FILE);
+}
+
+static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
- int rem = 0;
+ unsigned long rem = 0;
int tasksize;
int i;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
@@ -92,19 +101,17 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
break;
}
}
- if (sc->nr_to_scan > 0)
- lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n",
- sc->nr_to_scan, sc->gfp_mask, other_free,
- other_file, min_score_adj);
- rem = global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_ACTIVE_FILE) +
- global_page_state(NR_INACTIVE_ANON) +
- global_page_state(NR_INACTIVE_FILE);
- if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
- lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
- sc->nr_to_scan, sc->gfp_mask, rem);
- return rem;
+
+ lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
+ sc->nr_to_scan, sc->gfp_mask, other_free,
+ other_file, min_score_adj);
+
+ if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+ lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
+ sc->nr_to_scan, sc->gfp_mask);
+ return 0;
}
+
selected_oom_score_adj = min_score_adj;
rcu_read_lock();
@@ -154,16 +161,18 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
- rem -= selected_tasksize;
+ rem += selected_tasksize;
}
- lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+
+ lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
sc->nr_to_scan, sc->gfp_mask, rem);
rcu_read_unlock();
return rem;
}
static struct shrinker lowmem_shrinker = {
- .shrink = lowmem_shrink,
+ .scan_objects = lowmem_scan,
+ .count_objects = lowmem_count,
.seeks = DEFAULT_SEEKS * 16
};
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 63efb7b..2af15d4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -79,42 +79,4 @@
do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
#define MMSPACE_CLOSE set_fs(__oldfs)
-/*
- * Shrinker
- */
-
-# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
- struct shrinker *shrinker, \
- struct shrink_control *sc
-# define shrink_param(sc, var) ((sc)->var)
-
-typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
-
-static inline
-struct shrinker *set_shrinker(int seek, shrinker_t func)
-{
- struct shrinker *s;
-
- s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s == NULL)
- return (NULL);
-
- s->shrink = func;
- s->seeks = seek;
-
- register_shrinker(s);
-
- return s;
-}
-
-static inline
-void remove_shrinker(struct shrinker *shrinker)
-{
- if (shrinker == NULL)
- return;
-
- unregister_shrinker(shrinker);
- kfree(shrinker);
-}
-
#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 454027d..0025ee6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -521,7 +521,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
int nr, unsigned int gfp_mask)
{
struct ldlm_namespace *ns;
- int canceled = 0, unused;
+ int unused;
ns = ldlm_pl2ns(pl);
@@ -540,14 +540,10 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
unused = ns->ns_nr_unused;
spin_unlock(&ns->ns_lock);
- if (nr) {
- canceled = ldlm_cancel_lru(ns, nr, LCF_ASYNC,
- LDLM_CANCEL_SHRINK);
- }
- /*
- * Return the number of potentially reclaimable locks.
- */
- return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
+ if (nr == 0)
+ return (unused / 100) * sysctl_vfs_cache_pressure;
+ else
+ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
}
struct ldlm_pool_ops ldlm_srv_pool_ops = {
@@ -601,9 +597,10 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
return recalc_interval_sec;
}
-/**
+/*
* Pool shrink wrapper. Will call either client or server pool recalc callback
- * depending what pool \a pl is used.
+ * depending what pool pl is used. When nr == 0, just return the number of
+ * freeable locks. Otherwise, return the number of canceled locks.
*/
int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
unsigned int gfp_mask)
@@ -1017,29 +1014,24 @@ static int ldlm_pool_granted(struct ldlm_pool *pl)
}
static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
static struct completion ldlm_pools_comp;
/*
- * Cancel \a nr locks from all namespaces (if possible). Returns number of
- * cached locks after shrink is finished. All namespaces are asked to
- * cancel approximately equal amount of locks to keep balancing.
+ * count locks from all namespaces (if possible). Returns number of
+ * cached locks.
*/
-static int ldlm_pools_shrink(ldlm_side_t client, int nr,
- unsigned int gfp_mask)
+static unsigned long ldlm_pools_count(ldlm_side_t client, unsigned int gfp_mask)
{
- int total = 0, cached = 0, nr_ns;
+ int total = 0, nr_ns;
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL; /* loop detection */
void *cookie;
- if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
- !(gfp_mask & __GFP_FS))
- return -1;
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return 0;
- CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
- nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
+ CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
+ client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
cookie = cl_env_reenter();
@@ -1047,8 +1039,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
* Find out how many resources we may release.
*/
for (nr_ns = ldlm_namespace_nr_read(client);
- nr_ns > 0; nr_ns--)
- {
+ nr_ns > 0; nr_ns--) {
mutex_lock(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_unlock(ldlm_namespace_lock(client));
@@ -1078,17 +1069,27 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
ldlm_namespace_put(ns);
}
- if (nr == 0 || total == 0) {
- cl_env_reexit(cookie);
- return total;
- }
+ cl_env_reexit(cookie);
+ return total;
+}
+
+static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, unsigned int gfp_mask)
+{
+ unsigned long freed = 0;
+ int tmp, nr_ns;
+ struct ldlm_namespace *ns;
+ void *cookie;
+
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return -1;
+
+ cookie = cl_env_reenter();
/*
- * Shrink at least ldlm_namespace_nr(client) namespaces.
+ * Shrink at least ldlm_namespace_nr_read(client) namespaces.
*/
- for (nr_ns = ldlm_namespace_nr_read(client) - nr_ns;
- nr_ns > 0; nr_ns--)
- {
+ for (tmp = nr_ns = ldlm_namespace_nr_read(client);
+ tmp > 0; tmp--) {
int cancel, nr_locks;
/*
@@ -1097,12 +1098,6 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
mutex_lock(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_unlock(ldlm_namespace_lock(client));
- /*
- * If list is empty, we can't return any @cached > 0,
- * that probably would cause needless shrinker
- * call.
- */
- cached = 0;
break;
}
ns = ldlm_namespace_first_locked(client);
@@ -1111,29 +1106,42 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
mutex_unlock(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
- cancel = 1 + nr_locks * nr / total;
- ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
- cached += ldlm_pool_granted(&ns->ns_pool);
+ /*
+ * We use to shrink propotionally but with new shrinker API,
+ * we lost the total number of freeable locks.
+ */
+ cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
+ freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
ldlm_namespace_put(ns);
}
cl_env_reexit(cookie);
- /* we only decrease the SLV in server pools shrinker, return -1 to
- * kernel to avoid needless loop. LU-1128 */
- return (client == LDLM_NAMESPACE_SERVER) ? -1 : cached;
+ /*
+ * we only decrease the SLV in server pools shrinker, return
+ * SHRINK_STOP to kernel to avoid needless loop. LU-1128
+ */
+ return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
+}
+
+static unsigned long ldlm_pools_srv_count(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
}
-static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long ldlm_pools_srv_scan(struct shrinker *s, struct shrink_control *sc)
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
+ sc->gfp_mask);
}
-static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_cli_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
+ sc->gfp_mask);
}
int ldlm_pools_recalc(ldlm_side_t client)
@@ -1216,7 +1224,7 @@ int ldlm_pools_recalc(ldlm_side_t client)
}
/*
- * Recalc at least ldlm_namespace_nr(client) namespaces.
+ * Recalc at least ldlm_namespace_nr_read(client) namespaces.
*/
for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
int skip;
@@ -1383,18 +1391,26 @@ static void ldlm_pools_thread_stop(void)
ldlm_pools_thread = NULL;
}
+static struct shrinker ldlm_pools_srv_shrinker = {
+ .count_objects = ldlm_pools_srv_count,
+ .scan_objects = ldlm_pools_srv_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct shrinker ldlm_pools_cli_shrinker = {
+ .count_objects = ldlm_pools_cli_count,
+ .scan_objects = ldlm_pools_cli_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
int ldlm_pools_init(void)
{
int rc;
rc = ldlm_pools_thread_start();
if (rc == 0) {
- ldlm_pools_srv_shrinker =
- set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_srv_shrink);
- ldlm_pools_cli_shrinker =
- set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_cli_shrink);
+ register_shrinker(&ldlm_pools_srv_shrinker);
+ register_shrinker(&ldlm_pools_cli_shrinker);
}
return rc;
}
@@ -1402,14 +1418,8 @@ EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
- if (ldlm_pools_srv_shrinker != NULL) {
- remove_shrinker(ldlm_pools_srv_shrinker);
- ldlm_pools_srv_shrinker = NULL;
- }
- if (ldlm_pools_cli_shrinker != NULL) {
- remove_shrinker(ldlm_pools_cli_shrinker);
- ldlm_pools_cli_shrinker = NULL;
- }
+ unregister_shrinker(&ldlm_pools_srv_shrinker);
+ unregister_shrinker(&ldlm_pools_cli_shrinker);
ldlm_pools_thread_stop();
}
EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 253f026..bc534db 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1009,7 +1009,7 @@ static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
local_iov->iov_len = count;
init_sync_kiocb(kiocb, file);
kiocb->ki_pos = *ppos;
- kiocb->ki_left = count;
+ kiocb->ki_nbytes = count;
result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos);
*ppos = kiocb->ki_pos;
@@ -1068,7 +1068,7 @@ static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
local_iov->iov_len = count;
init_sync_kiocb(kiocb, file);
kiocb->ki_pos = *ppos;
- kiocb->ki_left = count;
+ kiocb->ki_nbytes = count;
result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos);
*ppos = kiocb->ki_pos;
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index c29ac1c..3a3d5bc 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -1779,7 +1779,6 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
}
EXPORT_SYMBOL(lu_env_refill_by_tags);
-static struct shrinker *lu_site_shrinker = NULL;
typedef struct lu_site_stats{
unsigned lss_populated;
@@ -1835,61 +1834,68 @@ static void lu_site_stats_get(cfs_hash_t *hs,
* objects without taking the lu_sites_guard lock, but this is not
* possible in the current implementation.
*/
-static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long lu_cache_shrink_count(struct shrinker *sk,
+ struct shrink_control *sc)
{
lu_site_stats_t stats;
struct lu_site *s;
struct lu_site *tmp;
- int cached = 0;
- int remain = shrink_param(sc, nr_to_scan);
- LIST_HEAD(splice);
-
- if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
- if (remain != 0)
- return -1;
- else
- /* We must not take the lu_sites_guard lock when
- * __GFP_FS is *not* set because of the deadlock
- * possibility detailed above. Additionally,
- * since we cannot determine the number of
- * objects in the cache without taking this
- * lock, we're in a particularly tough spot. As
- * a result, we'll just lie and say our cache is
- * empty. This _should_ be ok, as we can't
- * reclaim objects when __GFP_FS is *not* set
- * anyways.
- */
- return 0;
- }
+ unsigned long cached = 0;
- CDEBUG(D_INODE, "Shrink %d objects\n", remain);
+ if (!(sc->gfp_mask & __GFP_FS))
+ return 0;
mutex_lock(&lu_sites_guard);
list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
- if (shrink_param(sc, nr_to_scan) != 0) {
- remain = lu_site_purge(&lu_shrink_env, s, remain);
- /*
- * Move just shrunk site to the tail of site list to
- * assure shrinking fairness.
- */
- list_move_tail(&s->ls_linkage, &splice);
- }
-
memset(&stats, 0, sizeof(stats));
lu_site_stats_get(s->ls_obj_hash, &stats, 0);
cached += stats.lss_total - stats.lss_busy;
- if (shrink_param(sc, nr_to_scan) && remain <= 0)
- break;
}
- list_splice(&splice, lu_sites.prev);
mutex_unlock(&lu_sites_guard);
cached = (cached / 100) * sysctl_vfs_cache_pressure;
- if (shrink_param(sc, nr_to_scan) == 0)
- CDEBUG(D_INODE, "%d objects cached\n", cached);
+ CDEBUG(D_INODE, "%ld objects cached\n", cached);
return cached;
}
+static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ struct lu_site *s;
+ struct lu_site *tmp;
+ unsigned long remain = sc->nr_to_scan, freed = 0;
+ LIST_HEAD(splice);
+
+ if (!(sc->gfp_mask & __GFP_FS))
+ /* We must not take the lu_sites_guard lock when
+ * __GFP_FS is *not* set because of the deadlock
+ * possibility detailed above. Additionally,
+ * since we cannot determine the number of
+ * objects in the cache without taking this
+ * lock, we're in a particularly tough spot. As
+ * a result, we'll just lie and say our cache is
+ * empty. This _should_ be ok, as we can't
+ * reclaim objects when __GFP_FS is *not* set
+ * anyways.
+ */
+ return SHRINK_STOP;
+
+ mutex_lock(&lu_sites_guard);
+ list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+ freed = lu_site_purge(&lu_shrink_env, s, remain);
+ remain -= freed;
+ /*
+ * Move just shrunk site to the tail of site list to
+ * assure shrinking fairness.
+ */
+ list_move_tail(&s->ls_linkage, &splice);
+ }
+ list_splice(&splice, lu_sites.prev);
+ mutex_unlock(&lu_sites_guard);
+
+ return sc->nr_to_scan - remain;
+}
+
/*
* Debugging stuff.
*/
@@ -1913,6 +1919,12 @@ int lu_printk_printer(const struct lu_env *env,
return 0;
}
+static struct shrinker lu_site_shrinker = {
+ .count_objects = lu_cache_shrink_count,
+ .scan_objects = lu_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
/**
* Initialization of global lu_* data.
*/
@@ -1947,9 +1959,7 @@ int lu_global_init(void)
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
- lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
- if (lu_site_shrinker == NULL)
- return -ENOMEM;
+ register_shrinker(&lu_site_shrinker);
return result;
}
@@ -1959,11 +1969,7 @@ int lu_global_init(void)
*/
void lu_global_fini(void)
{
- if (lu_site_shrinker != NULL) {
- remove_shrinker(lu_site_shrinker);
- lu_site_shrinker = NULL;
- }
-
+ unregister_shrinker(&lu_site_shrinker);
lu_context_key_degister(&lu_global_key);
/*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 9013745..e90c8fb 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -121,13 +121,6 @@ static struct ptlrpc_enc_page_pool {
} page_pools;
/*
- * memory shrinker
- */
-const int pools_shrinker_seeks = DEFAULT_SEEKS;
-static struct shrinker *pools_shrinker = NULL;
-
-
-/*
* /proc/fs/lustre/sptlrpc/encrypt_page_pools
*/
int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
@@ -226,30 +219,46 @@ static void enc_pools_release_free_pages(long npages)
}
/*
- * could be called frequently for query (@nr_to_scan == 0).
* we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
*/
-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long enc_pools_shrink_count(struct shrinker *s,
+ struct shrink_control *sc)
{
- if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
+ /*
+ * if no pool access for a long time, we consider it's fully idle.
+ * a little race here is fine.
+ */
+ if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
- shrink_param(sc, nr_to_scan) = min_t(unsigned long,
- shrink_param(sc, nr_to_scan),
- page_pools.epp_free_pages -
- PTLRPC_MAX_BRW_PAGES);
- if (shrink_param(sc, nr_to_scan) > 0) {
- enc_pools_release_free_pages(shrink_param(sc,
- nr_to_scan));
- CDEBUG(D_SEC, "released %ld pages, %ld left\n",
- (long)shrink_param(sc, nr_to_scan),
- page_pools.epp_free_pages);
-
- page_pools.epp_st_shrinks++;
- page_pools.epp_last_shrink = cfs_time_current_sec();
- }
+ page_pools.epp_idle_idx = IDLE_IDX_MAX;
spin_unlock(&page_pools.epp_lock);
}
+ LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+ return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+ (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+}
+
+/*
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static unsigned long enc_pools_shrink_scan(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ spin_lock(&page_pools.epp_lock);
+ sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
+ page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
+ if (sc->nr_to_scan > 0) {
+ enc_pools_release_free_pages(sc->nr_to_scan);
+ CDEBUG(D_SEC, "released %ld pages, %ld left\n",
+ (long)sc->nr_to_scan, page_pools.epp_free_pages);
+
+ page_pools.epp_st_shrinks++;
+ page_pools.epp_last_shrink = cfs_time_current_sec();
+ }
+ spin_unlock(&page_pools.epp_lock);
+
/*
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
@@ -262,8 +271,7 @@ static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
}
LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
- (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+ return sc->nr_to_scan;
}
static inline
@@ -699,6 +707,12 @@ static inline void enc_pools_free(void)
sizeof(*page_pools.epp_pools));
}
+static struct shrinker pools_shrinker = {
+ .count_objects = enc_pools_shrink_count,
+ .scan_objects = enc_pools_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
int sptlrpc_enc_pool_init(void)
{
/*
@@ -736,12 +750,7 @@ int sptlrpc_enc_pool_init(void)
if (page_pools.epp_pools == NULL)
return -ENOMEM;
- pools_shrinker = set_shrinker(pools_shrinker_seeks,
- enc_pools_shrink);
- if (pools_shrinker == NULL) {
- enc_pools_free();
- return -ENOMEM;
- }
+ register_shrinker(&pools_shrinker);
return 0;
}
@@ -750,11 +759,10 @@ void sptlrpc_enc_pool_fini(void)
{
unsigned long cleaned, npools;
- LASSERT(pools_shrinker);
LASSERT(page_pools.epp_pools);
LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
- remove_shrinker(pools_shrinker);
+ unregister_shrinker(&pools_shrinker);
npools = npages_to_npools(page_pools.epp_total_pages);
cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 78b6cb7..199059d 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -48,13 +48,8 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
while (freed) {
struct sk_buff *skb = dev_alloc_skb(size + 256);
- if (unlikely(skb == NULL)) {
- pr_warning
- ("Failed to allocate skb for hardware pool %d\n",
- pool);
+ if (unlikely(skb == NULL))
break;
- }
-
skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d8f5f69..ea53af3 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -373,9 +373,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
* Enable interrupts on inband status changes
* for this port.
*/
- gmx_rx_int_en.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
- (index, interface));
+ gmx_rx_int_en.u64 = 0;
gmx_rx_int_en.s.phy_dupx = 1;
gmx_rx_int_en.s.phy_link = 1;
gmx_rx_int_en.s.phy_spd = 1;
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 34afc16..e14a1bb 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -303,6 +303,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (backlog > budget * cores_in_use && napi != NULL)
cvm_oct_enable_one_cpu();
}
+ rx_count++;
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
if (likely(skb_in_hw)) {
@@ -336,9 +337,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
skb = dev_alloc_skb(work->len);
if (!skb) {
- printk_ratelimited("Port %d failed to allocate "
- "skbuff, packet dropped\n",
- work->ipprt);
cvm_oct_free_work(work);
continue;
}
@@ -429,7 +427,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
#endif
}
netif_receive_skb(skb);
- rx_count++;
} else {
/* Drop any packet received for a device that isn't up */
/*
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 9fdcb56..85b012d 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,7 +13,8 @@ target_core_mod-y := target_core_configfs.o \
target_core_spc.o \
target_core_ua.o \
target_core_rd.o \
- target_core_stat.o
+ target_core_stat.o \
+ target_core_xcopy.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 3a17930..35b61f7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains main functions related to the iSCSI Target Core Driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -63,7 +61,6 @@ spinlock_t sess_idr_lock;
struct iscsit_global *iscsit_global;
-struct kmem_cache *lio_cmd_cache;
struct kmem_cache *lio_qr_cache;
struct kmem_cache *lio_dr_cache;
struct kmem_cache *lio_ooo_cache;
@@ -220,11 +217,6 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
spin_unlock_bh(&np->np_thread_lock);
return -1;
}
- if (np->np_login_tpg) {
- pr_err("np->np_login_tpg() is not NULL!\n");
- spin_unlock_bh(&np->np_thread_lock);
- return -1;
- }
spin_unlock_bh(&np->np_thread_lock);
/*
* Determine if the portal group is accepting storage traffic.
@@ -239,26 +231,38 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
/*
* Here we serialize access across the TIQN+TPG Tuple.
*/
- ret = mutex_lock_interruptible(&tpg->np_login_lock);
+ ret = down_interruptible(&tpg->np_login_sem);
if ((ret != 0) || signal_pending(current))
return -1;
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_tpg = tpg;
- spin_unlock_bh(&np->np_thread_lock);
+ spin_lock_bh(&tpg->tpg_state_lock);
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+ spin_unlock_bh(&tpg->tpg_state_lock);
+ up(&tpg->np_login_sem);
+ return -1;
+ }
+ spin_unlock_bh(&tpg->tpg_state_lock);
return 0;
}
-int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+void iscsit_login_kref_put(struct kref *kref)
+{
+ struct iscsi_tpg_np *tpg_np = container_of(kref,
+ struct iscsi_tpg_np, tpg_np_kref);
+
+ complete(&tpg_np->tpg_np_comp);
+}
+
+int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
+ struct iscsi_tpg_np *tpg_np)
{
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_tpg = NULL;
- spin_unlock_bh(&np->np_thread_lock);
+ up(&tpg->np_login_sem);
- mutex_unlock(&tpg->np_login_lock);
+ if (tpg_np)
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
if (tiqn)
iscsit_put_tiqn_for_login(tiqn);
@@ -410,20 +414,10 @@ struct iscsi_np *iscsit_add_np(
int iscsit_reset_np_thread(
struct iscsi_np *np,
struct iscsi_tpg_np *tpg_np,
- struct iscsi_portal_group *tpg)
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
spin_lock_bh(&np->np_thread_lock);
- if (tpg && tpg_np) {
- /*
- * The reset operation need only be performed when the
- * passed struct iscsi_portal_group has a login in progress
- * to one of the network portals.
- */
- if (tpg_np->tpg_np->np_login_tpg != tpg) {
- spin_unlock_bh(&np->np_thread_lock);
- return 0;
- }
- }
if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
return 0;
@@ -438,6 +432,12 @@ int iscsit_reset_np_thread(
}
spin_unlock_bh(&np->np_thread_lock);
+ if (tpg_np && shutdown) {
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
+
+ wait_for_completion(&tpg_np->tpg_np_comp);
+ }
+
return 0;
}
@@ -497,7 +497,6 @@ static struct iscsit_transport iscsi_target_transport = {
.iscsit_setup_np = iscsit_setup_np,
.iscsit_accept_np = iscsit_accept_np,
.iscsit_free_np = iscsit_free_np,
- .iscsit_alloc_cmd = iscsit_alloc_cmd,
.iscsit_get_login_rx = iscsit_get_login_rx,
.iscsit_put_login_tx = iscsit_put_login_tx,
.iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
@@ -538,22 +537,13 @@ static int __init iscsi_target_init_module(void)
goto ts_out1;
}
- lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
- sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
- 0, NULL);
- if (!lio_cmd_cache) {
- pr_err("Unable to kmem_cache_create() for"
- " lio_cmd_cache\n");
- goto ts_out2;
- }
-
lio_qr_cache = kmem_cache_create("lio_qr_cache",
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
pr_err("nable to kmem_cache_create() for"
" lio_qr_cache\n");
- goto cmd_out;
+ goto ts_out2;
}
lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -597,8 +587,6 @@ dr_out:
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
-cmd_out:
- kmem_cache_destroy(lio_cmd_cache);
ts_out2:
iscsi_deallocate_thread_sets();
ts_out1:
@@ -616,7 +604,6 @@ static void __exit iscsi_target_cleanup_module(void)
iscsi_thread_set_free();
iscsit_release_discovery_tpg();
iscsit_unregister_transport(&iscsi_target_transport);
- kmem_cache_destroy(lio_cmd_cache);
kmem_cache_destroy(lio_qr_cache);
kmem_cache_destroy(lio_dr_cache);
kmem_cache_destroy(lio_ooo_cache);
@@ -3447,12 +3434,10 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
bool inaddr_any = iscsit_check_inaddr_any(np);
len = sprintf(buf, "TargetAddress="
- "%s%s%s:%hu,%hu",
- (np->np_sockaddr.ss_family == AF_INET6) ?
- "[" : "", (inaddr_any == false) ?
+ "%s:%hu,%hu",
+ (inaddr_any == false) ?
np->np_ip : conn->local_ip,
- (np->np_sockaddr.ss_family == AF_INET6) ?
- "]" : "", (inaddr_any == false) ?
+ (inaddr_any == false) ?
np->np_port : conn->local_port,
tpg->tpgt);
len += 1;
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 2c437cb..e936d56 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -7,13 +7,15 @@ extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
-extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern void iscsit_login_kref_put(struct kref *);
+extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
+ struct iscsi_tpg_np *);
extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
struct iscsi_np *, int);
extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
char *, int);
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
- struct iscsi_portal_group *);
+ struct iscsi_portal_group *, bool);
extern int iscsit_del_np(struct iscsi_np *);
extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
@@ -37,7 +39,6 @@ extern struct target_fabric_configfs *lio_target_fabric_configfs;
extern struct kmem_cache *lio_dr_cache;
extern struct kmem_cache *lio_ooo_cache;
-extern struct kmem_cache *lio_cmd_cache;
extern struct kmem_cache *lio_qr_cache;
extern struct kmem_cache *lio_r2t_cache;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index cee1754..7505fdd 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file houses the main functions for the iSCSI CHAP support
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index bbfd288..fd14525 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -2,9 +2,7 @@
* This file contains the configfs implementation for iSCSI Target mode
* from the LIO-Target Project.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -265,9 +263,9 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
- ret = strict_strtoul(port_str, 0, &port);
+ ret = kstrtoul(port_str, 0, &port);
if (ret < 0) {
- pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ pr_err("kstrtoul() failed for port_str: %d\n", ret);
return ERR_PTR(ret);
}
sock_in6 = (struct sockaddr_in6 *)&sockaddr;
@@ -290,9 +288,9 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
- ret = strict_strtoul(port_str, 0, &port);
+ ret = kstrtoul(port_str, 0, &port);
if (ret < 0) {
- pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ pr_err("kstrtoul() failed for port_str: %d\n", ret);
return ERR_PTR(ret);
}
sock_in = (struct sockaddr_in *)&sockaddr;
@@ -1481,7 +1479,7 @@ static ssize_t lio_target_wwn_show_attr_lio_version(
struct target_fabric_configfs *tf,
char *page)
{
- return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
+ return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n");
}
TF_WWN_ATTR_RO(lio_target, lio_version);
@@ -1925,7 +1923,7 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
- cmd->release_cmd(cmd);
+ iscsit_release_cmd(cmd);
}
/* End functions for target_core_fabric_ops */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 4f77a78..9a5721b 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -9,7 +9,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#define ISCSIT_VERSION "v4.1.0-rc2"
+#define ISCSIT_VERSION "v4.1.0"
#define ISCSI_MAX_DATASN_MISSING_COUNT 16
#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
@@ -17,6 +17,9 @@
#define SECONDS_FOR_ASYNC_TEXT 10
#define SECONDS_FOR_LOGOUT_COMP 15
#define WHITE_SPACE " \t\v\f\n\r"
+#define ISCSIT_MIN_TAGS 16
+#define ISCSIT_EXTRA_TAGS 8
+#define ISCSIT_TCP_BACKLOG 256
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -47,7 +50,7 @@
#define TA_NETIF_TIMEOUT_MAX 15
#define TA_NETIF_TIMEOUT_MIN 2
#define TA_GENERATE_NODE_ACLS 0
-#define TA_DEFAULT_CMDSN_DEPTH 16
+#define TA_DEFAULT_CMDSN_DEPTH 64
#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
#define TA_CACHE_DYNAMIC_ACLS 0
@@ -489,7 +492,6 @@ struct iscsi_cmd {
u32 first_data_sg_off;
u32 kmapped_nents;
sense_reason_t sense_reason;
- void (*release_cmd)(struct iscsi_cmd *);
} ____cacheline_aligned;
struct iscsi_tmr_req {
@@ -554,9 +556,19 @@ struct iscsi_conn {
struct completion rx_half_close_comp;
/* socket used by this connection */
struct socket *sock;
+ void (*orig_data_ready)(struct sock *, int);
+ void (*orig_state_change)(struct sock *);
+#define LOGIN_FLAGS_READ_ACTIVE 1
+#define LOGIN_FLAGS_CLOSED 2
+#define LOGIN_FLAGS_READY 4
+ unsigned long login_flags;
+ struct delayed_work login_work;
+ struct delayed_work login_cleanup_work;
+ struct iscsi_login *login;
struct timer_list nopin_timer;
struct timer_list nopin_response_timer;
struct timer_list transport_timer;
+ struct task_struct *login_kworker;
/* Spinlock used for add/deleting cmd's from conn_cmd_list */
spinlock_t cmd_lock;
spinlock_t conn_usage_lock;
@@ -584,6 +596,7 @@ struct iscsi_conn {
void *context;
struct iscsi_login_thread_s *login_thread;
struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
/* Pointer to parent session */
struct iscsi_session *sess;
/* Pointer to thread_set in use for this conn's threads */
@@ -682,6 +695,7 @@ struct iscsi_login {
u8 version_max;
u8 login_complete;
u8 login_failed;
+ bool zero_tsih;
char isid[6];
u32 cmd_sn;
itt_t init_task_tag;
@@ -694,6 +708,7 @@ struct iscsi_login {
char *req_buf;
char *rsp_buf;
struct iscsi_conn *conn;
+ struct iscsi_np *np;
} ____cacheline_aligned;
struct iscsi_node_attrib {
@@ -773,7 +788,6 @@ struct iscsi_np {
struct __kernel_sockaddr_storage np_sockaddr;
struct task_struct *np_thread;
struct timer_list np_login_timer;
- struct iscsi_portal_group *np_login_tpg;
void *np_context;
struct iscsit_transport *np_transport;
struct list_head np_list;
@@ -788,6 +802,8 @@ struct iscsi_tpg_np {
struct list_head tpg_np_parent_list;
struct se_tpg_np se_tpg_np;
spinlock_t tpg_np_parent_lock;
+ struct completion tpg_np_comp;
+ struct kref tpg_np_kref;
};
struct iscsi_portal_group {
@@ -809,7 +825,7 @@ struct iscsi_portal_group {
spinlock_t tpg_state_lock;
struct se_portal_group tpg_se_tpg;
struct mutex tpg_access_lock;
- struct mutex np_login_lock;
+ struct semaphore np_login_sem;
struct iscsi_tpg_attrib tpg_attrib;
struct iscsi_node_auth tpg_demo_auth;
/* Pointer to default list of iSCSI parameters for TPG */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 848fee7..e93d5a7 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the iSCSI Target DataIN value generation functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 1b74033..6c7a510 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -2,9 +2,7 @@
* This file contains the iSCSI Virtual Device and Disk Transport
* agnostic related functions.
*
- \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 08bd878..41052e5 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -2,9 +2,7 @@
* This file contains error recovery level zero functions used by
* the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 586c268..e048d64 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains error recovery level one used by the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 45a5afd..33be1fb 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -2,9 +2,7 @@
* This file contains error recovery level two functions used by
* the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bc788c5..1794c75 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the login functions used by the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -50,6 +48,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
pr_err("Unable to allocate memory for struct iscsi_login.\n");
return NULL;
}
+ conn->login = login;
login->conn = conn;
login->first_request = 1;
@@ -428,7 +427,7 @@ static int iscsi_login_zero_tsih_s2(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
- rc = strict_strtoul(param->value, 0, &mrdsl);
+ rc = kstrtoul(param->value, 0, &mrdsl);
if (rc < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -684,7 +683,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
iscsit_start_nopin_timer(conn);
}
-static int iscsi_post_login_handler(
+int iscsi_post_login_handler(
struct iscsi_np *np,
struct iscsi_conn *conn,
u8 zero_tsih)
@@ -872,7 +871,7 @@ int iscsit_setup_np(
struct __kernel_sockaddr_storage *sockaddr)
{
struct socket *sock = NULL;
- int backlog = 5, ret, opt = 0, len;
+ int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
switch (np->np_network_transport) {
case ISCSI_TCP:
@@ -1007,16 +1006,24 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in6, &err, 1);
if (!rc) {
- snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
- &sock_in6.sin6_addr.in6_u);
+ if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
+ snprintf(conn->login_ip, sizeof(conn->login_ip), "[%pI6c]",
+ &sock_in6.sin6_addr.in6_u);
+ else
+ snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI4",
+ &sock_in6.sin6_addr.s6_addr32[3]);
conn->login_port = ntohs(sock_in6.sin6_port);
}
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in6, &err, 0);
if (!rc) {
- snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
- &sock_in6.sin6_addr.in6_u);
+ if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
+ snprintf(conn->local_ip, sizeof(conn->local_ip), "[%pI6c]",
+ &sock_in6.sin6_addr.in6_u);
+ else
+ snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI4",
+ &sock_in6.sin6_addr.s6_addr32[3]);
conn->local_port = ntohs(sock_in6.sin6_port);
}
} else {
@@ -1116,6 +1123,77 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
return 0;
}
+void iscsi_target_login_sess_out(struct iscsi_conn *conn,
+ struct iscsi_np *np, bool zero_tsih, bool new_sess)
+{
+ if (new_sess == false)
+ goto old_sess_out;
+
+ pr_err("iSCSI Login negotiation failed.\n");
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ if (!zero_tsih || !conn->sess)
+ goto old_sess_out;
+ if (conn->sess->se_sess)
+ transport_free_session(conn->sess->se_sess);
+ if (conn->sess->session_index != 0) {
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, conn->sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+ }
+ kfree(conn->sess->sess_ops);
+ kfree(conn->sess);
+
+old_sess_out:
+ iscsi_stop_login_thread_timer(np);
+ /*
+ * If login negotiation fails check if the Time2Retain timer
+ * needs to be restarted.
+ */
+ if (!zero_tsih && conn->sess) {
+ spin_lock_bh(&conn->sess->conn_lock);
+ if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
+ struct se_portal_group *se_tpg =
+ &ISCSI_TPG_C(conn)->tpg_se_tpg;
+
+ atomic_set(&conn->sess->session_continuation, 0);
+ spin_unlock_bh(&conn->sess->conn_lock);
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_start_time2retain_handler(conn->sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ } else
+ spin_unlock_bh(&conn->sess->conn_lock);
+ iscsit_dec_session_usage_count(conn->sess);
+ }
+
+ if (!IS_ERR(conn->conn_rx_hash.tfm))
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ if (!IS_ERR(conn->conn_tx_hash.tfm))
+ crypto_free_hash(conn->conn_tx_hash.tfm);
+
+ if (conn->conn_cpumask)
+ free_cpumask_var(conn->conn_cpumask);
+
+ kfree(conn->conn_ops);
+
+ if (conn->param_list) {
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+ iscsi_target_nego_release(conn);
+
+ if (conn->sock) {
+ sock_release(conn->sock);
+ conn->sock = NULL;
+ }
+
+ if (conn->conn_transport->iscsit_free_conn)
+ conn->conn_transport->iscsit_free_conn(conn);
+
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
+}
+
static int __iscsi_target_login_thread(struct iscsi_np *np)
{
u8 *buffer, zero_tsih = 0;
@@ -1124,6 +1202,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
struct iscsi_login *login;
struct iscsi_portal_group *tpg = NULL;
struct iscsi_login_req *pdu;
+ struct iscsi_tpg_np *tpg_np;
+ bool new_sess = false;
flush_signals(current);
@@ -1264,6 +1344,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
tpg = conn->tpg;
goto new_sess_out;
}
+ login->zero_tsih = zero_tsih;
tpg = conn->tpg;
if (!tpg) {
@@ -1279,7 +1360,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto old_sess_out;
}
- if (iscsi_target_start_negotiation(login, conn) < 0)
+ ret = iscsi_target_start_negotiation(login, conn);
+ if (ret < 0)
goto new_sess_out;
if (!conn->sess) {
@@ -1292,84 +1374,32 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
if (signal_pending(current))
goto new_sess_out;
- ret = iscsi_post_login_handler(np, conn, zero_tsih);
+ if (ret == 1) {
+ tpg_np = conn->tpg_np;
- if (ret < 0)
- goto new_sess_out;
+ ret = iscsi_post_login_handler(np, conn, zero_tsih);
+ if (ret < 0)
+ goto new_sess_out;
+
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ }
- iscsit_deaccess_np(np, tpg);
tpg = NULL;
+ tpg_np = NULL;
/* Get another socket */
return 1;
new_sess_out:
- pr_err("iSCSI Login negotiation failed.\n");
- iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
- ISCSI_LOGIN_STATUS_INIT_ERR);
- if (!zero_tsih || !conn->sess)
- goto old_sess_out;
- if (conn->sess->se_sess)
- transport_free_session(conn->sess->se_sess);
- if (conn->sess->session_index != 0) {
- spin_lock_bh(&sess_idr_lock);
- idr_remove(&sess_idr, conn->sess->session_index);
- spin_unlock_bh(&sess_idr_lock);
- }
- kfree(conn->sess->sess_ops);
- kfree(conn->sess);
+ new_sess = true;
old_sess_out:
- iscsi_stop_login_thread_timer(np);
- /*
- * If login negotiation fails check if the Time2Retain timer
- * needs to be restarted.
- */
- if (!zero_tsih && conn->sess) {
- spin_lock_bh(&conn->sess->conn_lock);
- if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
- struct se_portal_group *se_tpg =
- &ISCSI_TPG_C(conn)->tpg_se_tpg;
-
- atomic_set(&conn->sess->session_continuation, 0);
- spin_unlock_bh(&conn->sess->conn_lock);
- spin_lock_bh(&se_tpg->session_lock);
- iscsit_start_time2retain_handler(conn->sess);
- spin_unlock_bh(&se_tpg->session_lock);
- } else
- spin_unlock_bh(&conn->sess->conn_lock);
- iscsit_dec_session_usage_count(conn->sess);
- }
-
- if (!IS_ERR(conn->conn_rx_hash.tfm))
- crypto_free_hash(conn->conn_rx_hash.tfm);
- if (!IS_ERR(conn->conn_tx_hash.tfm))
- crypto_free_hash(conn->conn_tx_hash.tfm);
-
- if (conn->conn_cpumask)
- free_cpumask_var(conn->conn_cpumask);
-
- kfree(conn->conn_ops);
-
- if (conn->param_list) {
- iscsi_release_param_list(conn->param_list);
- conn->param_list = NULL;
- }
- iscsi_target_nego_release(conn);
-
- if (conn->sock) {
- sock_release(conn->sock);
- conn->sock = NULL;
- }
-
- if (conn->conn_transport->iscsit_free_conn)
- conn->conn_transport->iscsit_free_conn(conn);
-
- iscsit_put_transport(conn->conn_transport);
-
- kfree(conn);
+ tpg_np = conn->tpg_np;
+ iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess);
+ new_sess = false;
if (tpg) {
- iscsit_deaccess_np(np, tpg);
+ iscsit_deaccess_np(np, tpg, tpg_np);
tpg = NULL;
+ tpg_np = NULL;
}
out:
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 63efd28..29d0983 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -12,6 +12,9 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
+ bool, bool);
extern int iscsi_target_login_thread(void *);
extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index c4675b4..14d1aed 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains main functions related to iSCSI Parameter negotiation.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -377,15 +375,284 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
return 0;
}
-static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+static void iscsi_target_sk_data_ready(struct sock *sk, int count)
{
- if (iscsi_target_do_tx_login_io(conn, login) < 0)
- return -1;
+ struct iscsi_conn *conn = sk->sk_user_data;
+ bool rc;
- if (conn->conn_transport->iscsit_get_login_rx(conn, login) < 0)
- return -1;
+ pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
- return 0;
+ write_lock_bh(&sk->sk_callback_lock);
+ if (!sk->sk_user_data) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn);
+ return;
+ }
+ if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn);
+ return;
+ }
+ if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
+ return;
+ }
+
+ rc = schedule_delayed_work(&conn->login_work, 0);
+ if (rc == false) {
+ pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
+ " got false\n");
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void iscsi_target_sk_state_change(struct sock *);
+
+static void iscsi_target_set_sock_callbacks(struct iscsi_conn *conn)
+{
+ struct sock *sk;
+
+ if (!conn->sock)
+ return;
+
+ sk = conn->sock->sk;
+ pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = conn;
+ conn->orig_data_ready = sk->sk_data_ready;
+ conn->orig_state_change = sk->sk_state_change;
+ sk->sk_data_ready = iscsi_target_sk_data_ready;
+ sk->sk_state_change = iscsi_target_sk_state_change;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ;
+ sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ;
+}
+
+static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
+{
+ struct sock *sk;
+
+ if (!conn->sock)
+ return;
+
+ sk = conn->sock->sk;
+ pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (!sk->sk_user_data) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = conn->orig_data_ready;
+ sk->sk_state_change = conn->orig_state_change;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+}
+
+static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
+
+static bool iscsi_target_sk_state_check(struct sock *sk)
+{
+ if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
+ pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+ "returning FALSE\n");
+ return false;
+ }
+ return true;
+}
+
+static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ struct iscsi_np *np = login->np;
+ bool zero_tsih = login->zero_tsih;
+
+ iscsi_remove_failed_auth_entry(conn);
+ iscsi_target_nego_release(conn);
+ iscsi_target_login_sess_out(conn, np, zero_tsih, true);
+}
+
+static void iscsi_target_login_timeout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *)data;
+
+ pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
+
+ if (conn->login_kworker) {
+ pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
+ conn->login_kworker->comm, conn->login_kworker->pid);
+ send_sig(SIGINT, conn->login_kworker, 1);
+ }
+}
+
+static void iscsi_target_do_login_rx(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work,
+ struct iscsi_conn, login_work.work);
+ struct iscsi_login *login = conn->login;
+ struct iscsi_np *np = login->np;
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_tpg_np *tpg_np = conn->tpg_np;
+ struct timer_list login_timer;
+ int rc, zero_tsih = login->zero_tsih;
+ bool state;
+
+ pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
+ conn, current->comm, current->pid);
+
+ spin_lock(&tpg->tpg_state_lock);
+ state = (tpg->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&tpg->tpg_state_lock);
+
+ if (state == false) {
+ pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ return;
+ }
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ state = iscsi_target_sk_state_check(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
+
+ if (state == false) {
+ pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ return;
+ }
+ }
+
+ conn->login_kworker = current;
+ allow_signal(SIGINT);
+
+ init_timer(&login_timer);
+ login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
+ login_timer.data = (unsigned long)conn;
+ login_timer.function = iscsi_target_login_timeout;
+ add_timer(&login_timer);
+ pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid);
+
+ rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
+ del_timer_sync(&login_timer);
+ flush_signals(current);
+ conn->login_kworker = NULL;
+
+ if (rc < 0) {
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ return;
+ }
+
+ pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
+ conn, current->comm, current->pid);
+
+ rc = iscsi_target_do_login(conn, login);
+ if (rc < 0) {
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ } else if (!rc) {
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ } else if (rc == 1) {
+ iscsi_target_nego_release(conn);
+ iscsi_post_login_handler(np, conn, zero_tsih);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ }
+}
+
+static void iscsi_target_do_cleanup(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work,
+ struct iscsi_conn, login_cleanup_work.work);
+ struct sock *sk = conn->sock->sk;
+ struct iscsi_login *login = conn->login;
+ struct iscsi_np *np = login->np;
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_tpg_np *tpg_np = conn->tpg_np;
+
+ pr_debug("Entering iscsi_target_do_cleanup\n");
+
+ cancel_delayed_work_sync(&conn->login_work);
+ conn->orig_state_change(sk);
+
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+
+ pr_debug("iscsi_target_do_cleanup done()\n");
+}
+
+static void iscsi_target_sk_state_change(struct sock *sk)
+{
+ struct iscsi_conn *conn;
+ void (*orig_state_change)(struct sock *);
+ bool state;
+
+ pr_debug("Entering iscsi_target_sk_state_change\n");
+
+ write_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ orig_state_change = conn->orig_state_change;
+
+ if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n",
+ conn);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+ if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
+ " conn: %p\n", conn);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+ if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
+ conn);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+
+ state = iscsi_target_sk_state_check(sk);
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
+
+ if (!state) {
+ pr_debug("iscsi_target_sk_state_change got failed state\n");
+ schedule_delayed_work(&conn->login_cleanup_work, 0);
+ return;
+ }
+ orig_state_change(sk);
}
/*
@@ -643,10 +910,11 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
login->tsih = conn->sess->tsih;
login->login_complete = 1;
+ iscsi_target_restore_sock_callbacks(conn);
if (iscsi_target_do_tx_login_io(conn,
login) < 0)
return -1;
- return 0;
+ return 1;
}
break;
default:
@@ -656,13 +924,29 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
break;
}
- if (iscsi_target_do_login_io(conn, login) < 0)
+ if (iscsi_target_do_tx_login_io(conn, login) < 0)
return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
}
+ break;
+ }
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+ bool state;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ state = iscsi_target_sk_state_check(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
+
+ if (!state) {
+ pr_debug("iscsi_target_do_login() failed state for"
+ " conn: %p\n", conn);
+ return -1;
+ }
}
return 0;
@@ -695,9 +979,17 @@ int iscsi_target_locate_portal(
char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
struct iscsi_session *sess = conn->sess;
struct iscsi_tiqn *tiqn;
+ struct iscsi_tpg_np *tpg_np = NULL;
struct iscsi_login_req *login_req;
- u32 payload_length;
- int sessiontype = 0, ret = 0;
+ struct se_node_acl *se_nacl;
+ u32 payload_length, queue_depth = 0;
+ int sessiontype = 0, ret = 0, tag_num, tag_size;
+
+ INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
+ INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup);
+ iscsi_target_set_sock_callbacks(conn);
+
+ login->np = np;
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
@@ -791,7 +1083,7 @@ int iscsi_target_locate_portal(
goto out;
}
ret = 0;
- goto out;
+ goto alloc_tags;
}
get_target:
@@ -822,7 +1114,7 @@ get_target:
/*
* Locate Target Portal Group from Storage Node.
*/
- conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
+ conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np);
if (!conn->tpg) {
pr_err("Unable to locate Target Portal Group"
" on %s\n", tiqn->tiqn);
@@ -832,12 +1124,16 @@ get_target:
ret = -1;
goto out;
}
+ conn->tpg_np = tpg_np;
pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
/*
* Setup crc32c modules from libcrypto
*/
if (iscsi_login_setup_crypto(conn) < 0) {
pr_err("iscsi_login_setup_crypto() failed\n");
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
+ iscsit_put_tiqn_for_login(tiqn);
+ conn->tpg = NULL;
ret = -1;
goto out;
}
@@ -846,11 +1142,12 @@ get_target:
* process login attempt.
*/
if (iscsit_access_np(np, conn->tpg) < 0) {
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
iscsit_put_tiqn_for_login(tiqn);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
- ret = -1;
conn->tpg = NULL;
+ ret = -1;
goto out;
}
@@ -883,8 +1180,27 @@ get_target:
ret = -1;
goto out;
}
+ se_nacl = sess->se_sess->se_node_acl;
+ queue_depth = se_nacl->queue_depth;
+ /*
+ * Setup pre-allocated tags based upon allowed per NodeACL CmdSN
+ * depth for non immediate commands, plus extra tags for immediate
+ * commands.
+ *
+ * Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention
+ * in per-cpu-ida tag allocation logic + small queue_depth.
+ */
+alloc_tags:
+ tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
+ tag_num += ISCSIT_EXTRA_TAGS;
+ tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
- ret = 0;
+ ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ }
out:
kfree(tmpbuf);
return ret;
@@ -897,10 +1213,23 @@ int iscsi_target_start_negotiation(
int ret;
ret = iscsi_target_do_login(conn, login);
- if (ret != 0)
+ if (!ret) {
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ } else if (ret < 0) {
+ cancel_delayed_work_sync(&conn->login_work);
+ cancel_delayed_work_sync(&conn->login_cleanup_work);
+ iscsi_target_restore_sock_callbacks(conn);
iscsi_remove_failed_auth_entry(conn);
+ }
+ if (ret != 0)
+ iscsi_target_nego_release(conn);
- iscsi_target_nego_release(conn);
return ret;
}
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 11dc293..93bdc47 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the main functions related to Initiator Node Attributes.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 35fd643..4d2e23f 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains main functions related to iSCSI Parameter negotiation.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -1182,7 +1180,7 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
unsigned long long tmp;
int rc;
- rc = strict_strtoull(param->value, 0, &tmp);
+ rc = kstrtoull(param->value, 0, &tmp);
if (rc < 0)
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index edb592a..ca41b58 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -2,9 +2,7 @@
* This file contains main functions related to iSCSI DataSequenceInOrder=No
* and DataPDUInOrder=No.
*
- \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 464b420..f788e8b 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -2,9 +2,7 @@
* Modern ConfigFS group context specific iSCSI statistics based on original
* iscsi_target_mib.c code
*
- * Copyright (c) 2011 Rising Tide Systems
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * Copyright (c) 2011-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -177,7 +175,7 @@ ISCSI_STAT_INSTANCE_ATTR_RO(description);
static ssize_t iscsi_stat_instance_show_attr_vendor(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
- return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
+ return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
}
ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
@@ -432,13 +430,7 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
int ret;
spin_lock(&lstat->lock);
- if (lstat->last_intr_fail_ip_family == AF_INET6) {
- ret = snprintf(page, PAGE_SIZE, "[%s]\n",
- lstat->last_intr_fail_ip_addr);
- } else {
- ret = snprintf(page, PAGE_SIZE, "%s\n",
- lstat->last_intr_fail_ip_addr);
- }
+ ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr);
spin_unlock(&lstat->lock);
return ret;
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index b997e5d..78404b1 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the iSCSI Target specific Task Management functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 439260b..4faeb47 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains iSCSI Target Portal Group related functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -49,7 +47,7 @@ struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u1
INIT_LIST_HEAD(&tpg->tpg_gnp_list);
INIT_LIST_HEAD(&tpg->tpg_list);
mutex_init(&tpg->tpg_access_lock);
- mutex_init(&tpg->np_login_lock);
+ sema_init(&tpg->np_login_sem, 1);
spin_lock_init(&tpg->tpg_state_lock);
spin_lock_init(&tpg->tpg_np_lock);
@@ -129,7 +127,8 @@ void iscsit_release_discovery_tpg(void)
struct iscsi_portal_group *iscsit_get_tpg_from_np(
struct iscsi_tiqn *tiqn,
- struct iscsi_np *np)
+ struct iscsi_np *np,
+ struct iscsi_tpg_np **tpg_np_out)
{
struct iscsi_portal_group *tpg = NULL;
struct iscsi_tpg_np *tpg_np;
@@ -147,6 +146,8 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
if (tpg_np->tpg_np == np) {
+ *tpg_np_out = tpg_np;
+ kref_get(&tpg_np->tpg_np_kref);
spin_unlock(&tpg->tpg_np_lock);
spin_unlock(&tiqn->tiqn_tpg_lock);
return tpg;
@@ -175,18 +176,20 @@ void iscsit_put_tpg(struct iscsi_portal_group *tpg)
static void iscsit_clear_tpg_np_login_thread(
struct iscsi_tpg_np *tpg_np,
- struct iscsi_portal_group *tpg)
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
return;
}
- iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
+ iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
}
void iscsit_clear_tpg_np_login_threads(
- struct iscsi_portal_group *tpg)
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
struct iscsi_tpg_np *tpg_np;
@@ -197,7 +200,7 @@ void iscsit_clear_tpg_np_login_threads(
continue;
}
spin_unlock(&tpg->tpg_np_lock);
- iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown);
spin_lock(&tpg->tpg_np_lock);
}
spin_unlock(&tpg->tpg_np_lock);
@@ -268,6 +271,8 @@ int iscsit_tpg_del_portal_group(
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
+ iscsit_clear_tpg_np_login_threads(tpg, true);
+
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
pr_err("Unable to delete iSCSI Target Portal Group:"
" %hu while active sessions exist, and force=0\n",
@@ -368,7 +373,7 @@ int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
- iscsit_clear_tpg_np_login_threads(tpg);
+ iscsit_clear_tpg_np_login_threads(tpg, false);
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
spin_lock(&tpg->tpg_state_lock);
@@ -490,6 +495,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
spin_lock_init(&tpg_np->tpg_np_parent_lock);
+ init_completion(&tpg_np->tpg_np_comp);
+ kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np;
tpg_np->tpg = tpg;
@@ -520,7 +527,7 @@ static int iscsit_tpg_release_np(
struct iscsi_portal_group *tpg,
struct iscsi_np *np)
{
- iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index dda48c1..b77693e 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -5,10 +5,10 @@ extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *,
extern int iscsit_load_discovery_tpg(void);
extern void iscsit_release_discovery_tpg(void);
extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
- struct iscsi_np *);
+ struct iscsi_np *, struct iscsi_tpg_np **);
extern int iscsit_get_tpg(struct iscsi_portal_group *);
extern void iscsit_put_tpg(struct iscsi_portal_group *);
-extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
+extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool);
extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 8128952..601e9cc 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the iSCSI Login Thread and Thread Queue functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -105,12 +103,11 @@ int iscsi_allocate_thread_sets(u32 thread_pair_count)
ts->status = ISCSI_THREAD_SET_FREE;
INIT_LIST_HEAD(&ts->ts_list);
spin_lock_init(&ts->ts_state_lock);
- init_completion(&ts->rx_post_start_comp);
- init_completion(&ts->tx_post_start_comp);
init_completion(&ts->rx_restart_comp);
init_completion(&ts->tx_restart_comp);
init_completion(&ts->rx_start_comp);
init_completion(&ts->tx_start_comp);
+ sema_init(&ts->ts_activate_sem, 0);
ts->create_threads = 1;
ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
@@ -139,35 +136,44 @@ int iscsi_allocate_thread_sets(u32 thread_pair_count)
return allocated_thread_pair_count;
}
-void iscsi_deallocate_thread_sets(void)
+static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts)
{
- u32 released_count = 0;
- struct iscsi_thread_set *ts = NULL;
-
- while ((ts = iscsi_get_ts_from_inactive_list())) {
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_DIE;
+ if (ts->rx_thread) {
+ complete(&ts->rx_start_comp);
+ spin_unlock_bh(&ts->ts_state_lock);
+ kthread_stop(ts->rx_thread);
spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_DIE;
+ }
+ if (ts->tx_thread) {
+ complete(&ts->tx_start_comp);
spin_unlock_bh(&ts->ts_state_lock);
+ kthread_stop(ts->tx_thread);
+ spin_lock_bh(&ts->ts_state_lock);
+ }
+ spin_unlock_bh(&ts->ts_state_lock);
+ /*
+ * Release this thread_id in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap,
+ ts->thread_id, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
- if (ts->rx_thread) {
- send_sig(SIGINT, ts->rx_thread, 1);
- kthread_stop(ts->rx_thread);
- }
- if (ts->tx_thread) {
- send_sig(SIGINT, ts->tx_thread, 1);
- kthread_stop(ts->tx_thread);
- }
- /*
- * Release this thread_id in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- bitmap_release_region(iscsit_global->ts_bitmap,
- ts->thread_id, get_order(1));
- spin_unlock(&ts_bitmap_lock);
+ kfree(ts);
+}
+void iscsi_deallocate_thread_sets(void)
+{
+ struct iscsi_thread_set *ts = NULL;
+ u32 released_count = 0;
+
+ while ((ts = iscsi_get_ts_from_inactive_list())) {
+
+ iscsi_deallocate_thread_one(ts);
released_count++;
- kfree(ts);
}
if (released_count)
@@ -187,34 +193,13 @@ static void iscsi_deallocate_extra_thread_sets(void)
if (!ts)
break;
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_DIE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- if (ts->rx_thread) {
- send_sig(SIGINT, ts->rx_thread, 1);
- kthread_stop(ts->rx_thread);
- }
- if (ts->tx_thread) {
- send_sig(SIGINT, ts->tx_thread, 1);
- kthread_stop(ts->tx_thread);
- }
- /*
- * Release this thread_id in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- bitmap_release_region(iscsit_global->ts_bitmap,
- ts->thread_id, get_order(1));
- spin_unlock(&ts_bitmap_lock);
-
+ iscsi_deallocate_thread_one(ts);
released_count++;
- kfree(ts);
}
- if (released_count) {
+ if (released_count)
pr_debug("Stopped %d thread set(s) (%d total threads)."
"\n", released_count, released_count * 2);
- }
}
void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
@@ -224,37 +209,23 @@ void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set
spin_lock_bh(&ts->ts_state_lock);
conn->thread_set = ts;
ts->conn = conn;
+ ts->status = ISCSI_THREAD_SET_ACTIVE;
spin_unlock_bh(&ts->ts_state_lock);
- /*
- * Start up the RX thread and wait on rx_post_start_comp. The RX
- * Thread will then do the same for the TX Thread in
- * iscsi_rx_thread_pre_handler().
- */
+
complete(&ts->rx_start_comp);
- wait_for_completion(&ts->rx_post_start_comp);
+ complete(&ts->tx_start_comp);
+
+ down(&ts->ts_activate_sem);
}
struct iscsi_thread_set *iscsi_get_thread_set(void)
{
- int allocate_ts = 0;
- struct completion comp;
- struct iscsi_thread_set *ts = NULL;
- /*
- * If no inactive thread set is available on the first call to
- * iscsi_get_ts_from_inactive_list(), sleep for a second and
- * try again. If still none are available after two attempts,
- * allocate a set ourselves.
- */
+ struct iscsi_thread_set *ts;
+
get_set:
ts = iscsi_get_ts_from_inactive_list();
if (!ts) {
- if (allocate_ts == 2)
- iscsi_allocate_thread_sets(1);
-
- init_completion(&comp);
- wait_for_completion_timeout(&comp, 1 * HZ);
-
- allocate_ts++;
+ iscsi_allocate_thread_sets(1);
goto get_set;
}
@@ -263,6 +234,7 @@ get_set:
ts->thread_count = 2;
init_completion(&ts->rx_restart_comp);
init_completion(&ts->tx_restart_comp);
+ sema_init(&ts->ts_activate_sem, 0);
return ts;
}
@@ -400,7 +372,8 @@ static void iscsi_check_to_add_additional_sets(void)
static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
{
spin_lock_bh(&ts->ts_state_lock);
- if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
+ if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() ||
+ signal_pending(current)) {
spin_unlock_bh(&ts->ts_state_lock);
return -1;
}
@@ -419,7 +392,8 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
goto sleep;
}
- flush_signals(current);
+ if (ts->status != ISCSI_THREAD_SET_DIE)
+ flush_signals(current);
if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock);
@@ -446,18 +420,19 @@ sleep:
if (iscsi_signal_thread_pre_handler(ts) < 0)
return NULL;
+ iscsi_check_to_add_additional_sets();
+
+ spin_lock_bh(&ts->ts_state_lock);
if (!ts->conn) {
pr_err("struct iscsi_thread_set->conn is NULL for"
- " thread_id: %d, going back to sleep\n", ts->thread_id);
- goto sleep;
+ " RX thread_id: %s/%d\n", current->comm, current->pid);
+ spin_unlock_bh(&ts->ts_state_lock);
+ return NULL;
}
- iscsi_check_to_add_additional_sets();
- /*
- * The RX Thread starts up the TX Thread and sleeps.
- */
ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
- complete(&ts->tx_start_comp);
- wait_for_completion(&ts->tx_post_start_comp);
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ up(&ts->ts_activate_sem);
return ts->conn;
}
@@ -472,7 +447,8 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
goto sleep;
}
- flush_signals(current);
+ if (ts->status != ISCSI_THREAD_SET_DIE)
+ flush_signals(current);
if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock);
@@ -498,27 +474,20 @@ sleep:
if (iscsi_signal_thread_pre_handler(ts) < 0)
return NULL;
- if (!ts->conn) {
- pr_err("struct iscsi_thread_set->conn is NULL for "
- " thread_id: %d, going back to sleep\n",
- ts->thread_id);
- goto sleep;
- }
-
iscsi_check_to_add_additional_sets();
- /*
- * From the TX thread, up the tx_post_start_comp that the RX Thread is
- * sleeping on in iscsi_rx_thread_pre_handler(), then up the
- * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
- */
- ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
- complete(&ts->tx_post_start_comp);
- complete(&ts->rx_post_start_comp);
spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_ACTIVE;
+ if (!ts->conn) {
+ pr_err("struct iscsi_thread_set->conn is NULL for"
+ " TX thread_id: %s/%d\n", current->comm, current->pid);
+ spin_unlock_bh(&ts->ts_state_lock);
+ return NULL;
+ }
+ ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
spin_unlock_bh(&ts->ts_state_lock);
+ up(&ts->ts_activate_sem);
+
return ts->conn;
}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
index 547d118..cc1eede 100644
--- a/drivers/target/iscsi/iscsi_target_tq.h
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -64,10 +64,6 @@ struct iscsi_thread_set {
struct iscsi_conn *conn;
/* used for controlling ts state accesses */
spinlock_t ts_state_lock;
- /* Used for rx side post startup */
- struct completion rx_post_start_comp;
- /* Used for tx side post startup */
- struct completion tx_post_start_comp;
/* used for restarting thread queue */
struct completion rx_restart_comp;
/* used for restarting thread queue */
@@ -82,6 +78,7 @@ struct iscsi_thread_set {
struct task_struct *tx_thread;
/* struct iscsi_thread_set in list list head*/
struct list_head ts_list;
+ struct semaphore ts_activate_sem;
};
#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1df06d5..f2de28e 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1,9 +1,7 @@
/*******************************************************************************
* This file contains the iSCSI Target specific utility functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -19,6 +17,7 @@
******************************************************************************/
#include <linux/list.h>
+#include <linux/percpu_ida.h>
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
@@ -149,18 +148,6 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
spin_unlock_bh(&cmd->r2t_lock);
}
-struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
-{
- struct iscsi_cmd *cmd;
-
- cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
- if (!cmd)
- return NULL;
-
- cmd->release_cmd = &iscsit_release_cmd;
- return cmd;
-}
-
/*
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
@@ -168,12 +155,15 @@ struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
{
struct iscsi_cmd *cmd;
+ struct se_session *se_sess = conn->sess->se_sess;
+ int size, tag;
- cmd = conn->conn_transport->iscsit_alloc_cmd(conn, gfp_mask);
- if (!cmd) {
- pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
- return NULL;
- }
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
+ size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
+ cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
+ memset(cmd, 0, size);
+
+ cmd->se_cmd.map_tag = tag;
cmd->conn = conn;
INIT_LIST_HEAD(&cmd->i_conn_node);
INIT_LIST_HEAD(&cmd->datain_list);
@@ -689,6 +679,16 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
void iscsit_release_cmd(struct iscsi_cmd *cmd)
{
+ struct iscsi_session *sess;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (cmd->conn)
+ sess = cmd->conn->sess;
+ else
+ sess = cmd->sess;
+
+ BUG_ON(!sess || !sess->se_sess);
+
kfree(cmd->buf_ptr);
kfree(cmd->pdu_list);
kfree(cmd->seq_list);
@@ -696,8 +696,9 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
- kmem_cache_free(lio_cmd_cache, cmd);
+ percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
}
+EXPORT_SYMBOL(iscsit_release_cmd);
static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
bool check_queues)
@@ -761,7 +762,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
/* Fall-through */
default:
__iscsit_free_cmd(cmd, false, shutdown);
- cmd->release_cmd(cmd);
+ iscsit_release_cmd(cmd);
break;
}
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 568ad25..0f6d69d 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -3,7 +3,7 @@
* This file contains the Linux/SCSI LLD virtual SCSI initiator driver
* for emulated SAS initiator ports
*
- * © Copyright 2011 RisingTide Systems LLC.
+ * © Copyright 2011-2013 Datera, Inc.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index cbe48ab..4724410 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -3,7 +3,7 @@
*
* This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
*
- * (c) Copyright 2009-2012 RisingTide Systems LLC.
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -557,6 +557,9 @@ target_alua_state_check(struct se_cmd *cmd)
* a ALUA logical unit group.
*/
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ return 0;
+
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
@@ -730,7 +733,7 @@ static int core_alua_write_tpg_metadata(
if (ret < 0)
pr_err("Error writing ALUA metadata file: %s\n", path);
fput(file);
- return ret ? -EIO : 0;
+ return (ret < 0) ? -EIO : 0;
}
/*
@@ -1756,10 +1759,10 @@ ssize_t core_alua_store_access_type(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_access_type\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
pr_err("Illegal value for alua_access_type:"
@@ -1794,10 +1797,10 @@ ssize_t core_alua_store_nonop_delay_msecs(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract nonop_delay_msecs\n");
- return -EINVAL;
+ return ret;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
pr_err("Passed nonop_delay_msecs: %lu, exceeds"
@@ -1825,10 +1828,10 @@ ssize_t core_alua_store_trans_delay_msecs(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract trans_delay_msecs\n");
- return -EINVAL;
+ return ret;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
pr_err("Passed trans_delay_msecs: %lu, exceeds"
@@ -1856,10 +1859,10 @@ ssize_t core_alua_store_implict_trans_secs(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract implict_trans_secs\n");
- return -EINVAL;
+ return ret;
}
if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
pr_err("Passed implict_trans_secs: %lu, exceeds"
@@ -1887,10 +1890,10 @@ ssize_t core_alua_store_preferred_bit(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract preferred ALUA value\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
@@ -1922,10 +1925,10 @@ ssize_t core_alua_store_offline_bit(
if (!lun->lun_sep)
return -ENODEV;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_offline value\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
@@ -1961,10 +1964,10 @@ ssize_t core_alua_store_secondary_status(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_status\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != ALUA_STATUS_NONE) &&
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
@@ -1994,10 +1997,10 @@ ssize_t core_alua_store_secondary_write_metadata(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_write_md\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_tg_pt_write_md:"
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e4d2293..82e81c5 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3,7 +3,7 @@
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
- * (c) Copyright 2008-2012 RisingTide Systems LLC.
+ * (c) Copyright 2008-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -48,6 +48,7 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
+#include "target_core_xcopy.h"
extern struct t10_alua_lu_gp *default_lu_gp;
@@ -268,7 +269,7 @@ static struct configfs_subsystem target_core_fabrics = {
},
};
-static struct configfs_subsystem *target_core_subsystem[] = {
+struct configfs_subsystem *target_core_subsystem[] = {
&target_core_fabrics,
NULL,
};
@@ -577,9 +578,9 @@ static ssize_t target_core_dev_store_attr_##_name( \
unsigned long val; \
int ret; \
\
- ret = strict_strtoul(page, 0, &val); \
+ ret = kstrtoul(page, 0, &val); \
if (ret < 0) { \
- pr_err("strict_strtoul() failed with" \
+ pr_err("kstrtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
@@ -636,6 +637,12 @@ SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_tpws);
SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(emulate_caw);
+SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_3pc);
+SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB(enforce_pr_isids);
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
@@ -693,6 +700,8 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_emulate_tas.attr,
&target_core_dev_attrib_emulate_tpu.attr,
&target_core_dev_attrib_emulate_tpws.attr,
+ &target_core_dev_attrib_emulate_caw.attr,
+ &target_core_dev_attrib_emulate_3pc.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
&target_core_dev_attrib_is_nonrot.attr,
&target_core_dev_attrib_emulate_rest_reord.attr,
@@ -1310,9 +1319,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- ret = strict_strtoull(arg_p, 0, &tmp_ll);
+ ret = kstrtoull(arg_p, 0, &tmp_ll);
if (ret < 0) {
- pr_err("strict_strtoull() failed for"
+ pr_err("kstrtoull() failed for"
" sa_res_key=\n");
goto out;
}
@@ -1836,11 +1845,11 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
unsigned long lu_gp_id;
int ret;
- ret = strict_strtoul(page, 0, &lu_gp_id);
+ ret = kstrtoul(page, 0, &lu_gp_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
+ pr_err("kstrtoul() returned %d for"
" lu_gp_id\n", ret);
- return -EINVAL;
+ return ret;
}
if (lu_gp_id > 0x0000ffff) {
pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
@@ -2032,11 +2041,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
return -EINVAL;
}
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access state from"
" %s\n", page);
- return -EINVAL;
+ return ret;
}
new_state = (int)tmp;
@@ -2079,11 +2088,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
return -EINVAL;
}
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access status"
" from %s\n", page);
- return -EINVAL;
+ return ret;
}
new_status = (int)tmp;
@@ -2139,10 +2148,10 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_write_metadata\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
@@ -2263,11 +2272,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
unsigned long tg_pt_gp_id;
int ret;
- ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+ ret = kstrtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
+ pr_err("kstrtoul() returned %d for"
" tg_pt_gp_id\n", ret);
- return -EINVAL;
+ return ret;
}
if (tg_pt_gp_id > 0x0000ffff) {
pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
@@ -2676,10 +2685,10 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
if (transport->pmode_enable_hba == NULL)
return -EINVAL;
- ret = strict_strtoul(page, 0, &mode_flag);
+ ret = kstrtoul(page, 0, &mode_flag);
if (ret < 0) {
pr_err("Unable to extract hba mode flag: %d\n", ret);
- return -EINVAL;
+ return ret;
}
if (hba->dev_count) {
@@ -2767,11 +2776,11 @@ static struct config_group *target_core_call_addhbatotarget(
str++; /* Skip to start of plugin dependent ID */
}
- ret = strict_strtoul(str, 0, &plugin_dep_id);
+ ret = kstrtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
+ pr_err("kstrtoul() returned %d for"
" plugin_dep_id\n", ret);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(ret);
}
/*
* Load up TCM subsystem plugins if they have not already been loaded.
@@ -2927,6 +2936,10 @@ static int __init target_core_init_configfs(void)
if (ret < 0)
goto out;
+ ret = target_xcopy_setup_pt();
+ if (ret < 0)
+ goto out;
+
return 0;
out:
@@ -2999,6 +3012,7 @@ static void __exit target_core_exit_configfs(void)
core_dev_release_virtual_lun0();
rd_module_exit();
+ target_xcopy_release_pt();
release_se_kmem_caches();
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8f4142f..d90dbb0 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -4,7 +4,7 @@
* This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -47,6 +47,9 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
+DEFINE_MUTEX(g_device_mutex);
+LIST_HEAD(g_device_list);
+
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
@@ -890,6 +893,32 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
return 0;
}
+int se_dev_set_emulate_caw(struct se_device *dev, int flag)
+{
+ if (flag != 0 && flag != 1) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+ dev->dev_attrib.emulate_caw = flag;
+ pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
+ dev, flag);
+
+ return 0;
+}
+
+int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
+{
+ if (flag != 0 && flag != 1) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+ dev->dev_attrib.emulate_3pc = flag;
+ pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
+ dev, flag);
+
+ return 0;
+}
+
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
@@ -1393,6 +1422,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
+ INIT_LIST_HEAD(&dev->g_dev_node);
spin_lock_init(&dev->stats_lock);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
@@ -1400,6 +1430,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
+ sema_init(&dev->caw_sem, 1);
atomic_set(&dev->dev_ordered_id, 0);
INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
@@ -1423,6 +1454,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+ dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
+ dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
dev->dev_attrib.is_nonrot = DA_IS_NONROT;
dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
@@ -1510,6 +1543,11 @@ int target_configure_device(struct se_device *dev)
spin_lock(&hba->device_lock);
hba->dev_count++;
spin_unlock(&hba->device_lock);
+
+ mutex_lock(&g_device_mutex);
+ list_add_tail(&dev->g_dev_node, &g_device_list);
+ mutex_unlock(&g_device_mutex);
+
return 0;
out_free_alua:
@@ -1528,6 +1566,10 @@ void target_free_device(struct se_device *dev)
if (dev->dev_flags & DF_CONFIGURED) {
destroy_workqueue(dev->tmr_wq);
+ mutex_lock(&g_device_mutex);
+ list_del(&dev->g_dev_node);
+ mutex_unlock(&g_device_mutex);
+
spin_lock(&hba->device_lock);
hba->dev_count--;
spin_unlock(&hba->device_lock);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index eb56eb1..3503996 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -4,7 +4,7 @@
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
- * (c) Copyright 2010-2012 RisingTide Systems LLC.
+ * (c) Copyright 2010-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -189,9 +189,11 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
unsigned long op;
+ int ret;
- if (strict_strtoul(page, 0, &op))
- return -EINVAL;
+ ret = kstrtoul(page, 0, &op);
+ if (ret)
+ return ret;
if ((op != 1) && (op != 0))
return -EINVAL;
@@ -350,7 +352,10 @@ static struct config_group *target_fabric_make_mappedlun(
* Determine the Mapped LUN value. This is what the SCSI Initiator
* Port will actually see.
*/
- if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
+ ret = kstrtoul(buf + 4, 0, &mapped_lun);
+ if (ret)
+ goto out;
+ if (mapped_lun > UINT_MAX) {
ret = -EINVAL;
goto out;
}
@@ -875,7 +880,10 @@ static struct config_group *target_fabric_make_lun(
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
- if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
+ errno = kstrtoul(name + 4, 0, &unpacked_lun);
+ if (errno)
+ return ERR_PTR(errno);
+ if (unpacked_lun > UINT_MAX)
return ERR_PTR(-EINVAL);
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 687b0b0..0d1cf8b 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -4,7 +4,7 @@
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
- * (c) Copyright 2010-2012 RisingTide Systems LLC.
+ * (c) Copyright 2010-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b11890d..b662f89 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -3,7 +3,7 @@
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
- * (c) Copyright 2005-2012 RisingTide Systems LLC.
+ * (c) Copyright 2005-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -547,11 +547,9 @@ fd_execute_unmap(struct se_cmd *cmd)
}
static sense_reason_t
-fd_execute_rw(struct se_cmd *cmd)
+fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct scatterlist *sgl = cmd->t_data_sg;
- u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev;
int ret = 0;
@@ -635,10 +633,10 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
ret = -ENOMEM;
break;
}
- ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+ ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p);
if (ret < 0) {
- pr_err("strict_strtoull() failed for"
+ pr_err("kstrtoull() failed for"
" fd_dev_size=\n");
goto out;
}
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index d2616cd..a25051a 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -3,7 +3,7 @@
*
* This file contains the TCM HBA Transport related functions.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index aa1620a..b9a3394 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -4,7 +4,7 @@
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -536,10 +536,10 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
ret = -ENOMEM;
break;
}
- ret = strict_strtoul(arg_p, 0, &tmp_readonly);
+ ret = kstrtoul(arg_p, 0, &tmp_readonly);
kfree(arg_p);
if (ret < 0) {
- pr_err("strict_strtoul() failed for"
+ pr_err("kstrtoul() failed for"
" readonly=\n");
goto out;
}
@@ -587,11 +587,9 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
}
static sense_reason_t
-iblock_execute_rw(struct se_cmd *cmd)
+iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct scatterlist *sgl = cmd->t_data_sg;
- u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr;
struct bio *bio;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 18d49df..579128a 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -33,6 +33,8 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
int se_dev_set_emulate_tas(struct se_device *, int);
int se_dev_set_emulate_tpu(struct se_device *, int);
int se_dev_set_emulate_tpws(struct se_device *, int);
+int se_dev_set_emulate_caw(struct se_device *, int);
+int se_dev_set_emulate_3pc(struct se_device *, int);
int se_dev_set_enforce_pr_isids(struct se_device *, int);
int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index bd78faf..d1ae4c5 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -4,7 +4,7 @@
* This file contains SPC-3 compliant persistent reservations and
* legacy SPC-2 reservations with compatible reservation handling (CRH=1)
*
- * (c) Copyright 2009-2012 RisingTide Systems LLC.
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -1949,7 +1949,7 @@ static int __core_scsi3_write_aptpl_to_file(
pr_debug("Error writing APTPL metadata file: %s\n", path);
fput(file);
- return ret ? -EIO : 0;
+ return (ret < 0) ? -EIO : 0;
}
/*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index e992b27..551c96c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -3,7 +3,7 @@
*
* This file contains the generic target mode <-> Linux SCSI subsystem plugin.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -1050,9 +1050,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req = blk_get_request(pdv->pdv_sd->request_queue,
(data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
- if (!req || IS_ERR(req)) {
- pr_err("PSCSI: blk_get_request() failed: %ld\n",
- req ? IS_ERR(req) : -ENOMEM);
+ if (!req) {
+ pr_err("PSCSI: blk_get_request() failed\n");
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail;
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 51127d1..131327a 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -4,7 +4,7 @@
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -280,11 +280,9 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
}
static sense_reason_t
-rd_execute_rw(struct se_cmd *cmd)
+rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct scatterlist *sgl = cmd->t_data_sg;
- u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 8a46277..6c17295 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1,7 +1,7 @@
/*
* SCSI Block Commands (SBC) parsing and emulation.
*
- * (c) Copyright 2002-2012 RisingTide Systems LLC.
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -25,6 +25,7 @@
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -280,13 +281,13 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
return 0;
}
-static void xdreadwrite_callback(struct se_cmd *cmd)
+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
{
unsigned char *buf, *addr;
struct scatterlist *sg;
unsigned int offset;
- int i;
- int count;
+ sense_reason_t ret = TCM_NO_SENSE;
+ int i, count;
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
@@ -301,7 +302,7 @@ static void xdreadwrite_callback(struct se_cmd *cmd)
buf = kmalloc(cmd->data_length, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate xor_callback buf\n");
- return;
+ return TCM_OUT_OF_RESOURCES;
}
/*
* Copy the scatterlist WRITE buffer located at cmd->t_data_sg
@@ -320,8 +321,10 @@ static void xdreadwrite_callback(struct se_cmd *cmd)
offset = 0;
for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
addr = kmap_atomic(sg_page(sg));
- if (!addr)
+ if (!addr) {
+ ret = TCM_OUT_OF_RESOURCES;
goto out;
+ }
for (i = 0; i < sg->length; i++)
*(addr + sg->offset + i) ^= *(buf + offset + i);
@@ -332,6 +335,193 @@ static void xdreadwrite_callback(struct se_cmd *cmd)
out:
kfree(buf);
+ return ret;
+}
+
+static sense_reason_t
+sbc_execute_rw(struct se_cmd *cmd)
+{
+ return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
+ cmd->data_direction);
+}
+
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+ /*
+ * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
+ * before the original READ I/O submission.
+ */
+ up(&dev->caw_sem);
+
+ return TCM_NO_SENSE;
+}
+
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *write_sg = NULL, *sg;
+ unsigned char *buf, *addr;
+ struct sg_mapping_iter m;
+ unsigned int offset = 0, len;
+ unsigned int nlbas = cmd->t_task_nolb;
+ unsigned int block_size = dev->dev_attrib.block_size;
+ unsigned int compare_len = (nlbas * block_size);
+ sense_reason_t ret = TCM_NO_SENSE;
+ int rc, i;
+
+ /*
+ * Handle early failure in transport_generic_request_failure(),
+ * which will not have taken ->caw_mutex yet..
+ */
+ if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
+ return TCM_NO_SENSE;
+
+ buf = kzalloc(cmd->data_length, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate compare_and_write buf\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+
+ write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+ GFP_KERNEL);
+ if (!write_sg) {
+ pr_err("Unable to allocate compare_and_write sg\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+ /*
+ * Setup verify and write data payloads from total NumberLBAs.
+ */
+ rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
+ cmd->data_length);
+ if (!rc) {
+ pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+ /*
+ * Compare against SCSI READ payload against verify payload
+ */
+ for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
+ addr = (unsigned char *)kmap_atomic(sg_page(sg));
+ if (!addr) {
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+
+ len = min(sg->length, compare_len);
+
+ if (memcmp(addr, buf + offset, len)) {
+ pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
+ addr, buf + offset);
+ kunmap_atomic(addr);
+ goto miscompare;
+ }
+ kunmap_atomic(addr);
+
+ offset += len;
+ compare_len -= len;
+ if (!compare_len)
+ break;
+ }
+
+ i = 0;
+ len = cmd->t_task_nolb * block_size;
+ sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
+ /*
+ * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
+ */
+ while (len) {
+ sg_miter_next(&m);
+
+ if (block_size < PAGE_SIZE) {
+ sg_set_page(&write_sg[i], m.page, block_size,
+ block_size);
+ } else {
+ sg_miter_next(&m);
+ sg_set_page(&write_sg[i], m.page, block_size,
+ 0);
+ }
+ len -= block_size;
+ i++;
+ }
+ sg_miter_stop(&m);
+ /*
+ * Save the original SGL + nents values before updating to new
+ * assignments, to be released in transport_free_pages() ->
+ * transport_reset_sgl_orig()
+ */
+ cmd->t_data_sg_orig = cmd->t_data_sg;
+ cmd->t_data_sg = write_sg;
+ cmd->t_data_nents_orig = cmd->t_data_nents;
+ cmd->t_data_nents = 1;
+
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->transport_complete_callback = compare_and_write_post;
+ /*
+ * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
+ * for submitting the adjusted SGL to write instance user-data.
+ */
+ cmd->execute_cmd = sbc_execute_rw;
+
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ __target_execute_cmd(cmd);
+
+ kfree(buf);
+ return ret;
+
+miscompare:
+ pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
+ dev->transport->name);
+ ret = TCM_MISCOMPARE_VERIFY;
+out:
+ /*
+ * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
+ * sbc_compare_and_write() before the original READ I/O submission.
+ */
+ up(&dev->caw_sem);
+ kfree(write_sg);
+ kfree(buf);
+ return ret;
+}
+
+static sense_reason_t
+sbc_compare_and_write(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret;
+ int rc;
+ /*
+ * Submit the READ first for COMPARE_AND_WRITE to perform the
+ * comparision using SGLs at cmd->t_bidi_data_sg..
+ */
+ rc = down_interruptible(&dev->caw_sem);
+ if ((rc != 0) || signal_pending(current)) {
+ cmd->transport_complete_callback = NULL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ cmd->transport_complete_callback = NULL;
+ up(&dev->caw_sem);
+ return ret;
+ }
+ /*
+ * Unlock of dev->caw_sem to occur in compare_and_write_callback()
+ * upon MISCOMPARE, or in compare_and_write_done() upon completion
+ * of WRITE instance user-data.
+ */
+ return TCM_NO_SENSE;
}
sense_reason_t
@@ -348,31 +538,36 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case READ_10:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case READ_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case READ_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_10:
case WRITE_VERIFY:
@@ -381,7 +576,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_12:
sectors = transport_get_sectors_12(cdb);
@@ -389,7 +585,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_16:
sectors = transport_get_sectors_16(cdb);
@@ -397,7 +594,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case XDWRITEREAD_10:
if (cmd->data_direction != DMA_TO_DEVICE ||
@@ -411,7 +609,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
/*
* Setup BIDI XOR callback to be run after I/O completion.
*/
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
cmd->transport_complete_callback = &xdreadwrite_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
@@ -434,7 +633,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
* Setup BIDI XOR callback to be run during after I/O
* completion.
*/
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
cmd->transport_complete_callback = &xdreadwrite_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
@@ -461,6 +661,28 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
}
break;
}
+ case COMPARE_AND_WRITE:
+ sectors = cdb[13];
+ /*
+ * Currently enforce COMPARE_AND_WRITE for a single sector
+ */
+ if (sectors > 1) {
+ pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
+ " than 1\n", sectors);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Double size because we have two buffers, note that
+ * zero is not an error..
+ */
+ size = 2 * sbc_get_size(cmd, sectors);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+ cmd->t_task_nolb = sectors;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
+ cmd->execute_rw = ops->execute_rw;
+ cmd->execute_cmd = sbc_compare_and_write;
+ cmd->transport_complete_callback = compare_and_write_callback;
+ break;
case READ_CAPACITY:
size = READ_CAP_LEN;
cmd->execute_cmd = sbc_emulate_readcapacity;
@@ -600,7 +822,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return TCM_ADDRESS_OUT_OF_RANGE;
}
- size = sbc_get_size(cmd, sectors);
+ if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
+ size = sbc_get_size(cmd, sectors);
}
return target_cmd_size_check(cmd, size);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 9fabbf7..0745395 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1,7 +1,7 @@
/*
* SCSI Primary Commands (SPC) parsing and emulation.
*
- * (c) Copyright 2002-2012 RisingTide Systems LLC.
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -35,7 +35,7 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
-
+#include "target_core_xcopy.h"
static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
{
@@ -95,6 +95,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
*/
spc_fill_alua_data(lun->lun_sep, buf);
+ /*
+ * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
+ */
+ if (dev->dev_attrib.emulate_3pc)
+ buf[5] |= 0x8;
+
buf[7] = 0x2; /* CmdQue=1 */
memcpy(&buf[8], "LIO-ORG ", 8);
@@ -129,8 +135,8 @@ spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
- unsigned char *buf)
+void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
+ unsigned char *buf)
{
unsigned char *p = &dev->t10_wwn.unit_serial[0];
int cnt;
@@ -460,6 +466,11 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/* Set WSNZ to 1 */
buf[4] = 0x01;
+ /*
+ * Set MAXIMUM COMPARE AND WRITE LENGTH
+ */
+ if (dev->dev_attrib.emulate_caw)
+ buf[5] = 0x01;
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
@@ -1250,8 +1261,14 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
break;
case EXTENDED_COPY:
- case READ_ATTRIBUTE:
+ *size = get_unaligned_be32(&cdb[10]);
+ cmd->execute_cmd = target_do_xcopy;
+ break;
case RECEIVE_COPY_RESULTS:
+ *size = get_unaligned_be32(&cdb[10]);
+ cmd->execute_cmd = target_do_receive_copy_results;
+ break;
+ case READ_ATTRIBUTE:
case WRITE_ATTRIBUTE:
*size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index d154ce7..9c642e0 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -4,7 +4,7 @@
* Modern ConfigFS group context specific statistics based on original
* target_core_mib.c code
*
- * (c) Copyright 2006-2012 RisingTide Systems LLC.
+ * (c) Copyright 2006-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 0d7cacb..2500099 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -3,7 +3,7 @@
*
* This file contains SPC-3 task management infrastructure
*
- * (c) Copyright 2009-2012 RisingTide Systems LLC.
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index aac9d27..b9a6ec0 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -3,7 +3,7 @@
*
* This file contains generic Target Portal Group related functions.
*
- * (c) Copyright 2002-2012 RisingTide Systems LLC.
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d8e49d7..84747cc 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3,7 +3,7 @@
*
* This file contains the Generic Target Engine Core.
*
- * (c) Copyright 2002-2012 RisingTide Systems LLC.
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -67,7 +67,6 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
-static int transport_generic_get_mem(struct se_cmd *cmd);
static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
@@ -232,6 +231,50 @@ struct se_session *transport_init_session(void)
}
EXPORT_SYMBOL(transport_init_session);
+int transport_alloc_session_tags(struct se_session *se_sess,
+ unsigned int tag_num, unsigned int tag_size)
+{
+ int rc;
+
+ se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL);
+ if (!se_sess->sess_cmd_map) {
+ pr_err("Unable to allocate se_sess->sess_cmd_map\n");
+ return -ENOMEM;
+ }
+
+ rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
+ if (rc < 0) {
+ pr_err("Unable to init se_sess->sess_tag_pool,"
+ " tag_num: %u\n", tag_num);
+ kfree(se_sess->sess_cmd_map);
+ se_sess->sess_cmd_map = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_alloc_session_tags);
+
+struct se_session *transport_init_session_tags(unsigned int tag_num,
+ unsigned int tag_size)
+{
+ struct se_session *se_sess;
+ int rc;
+
+ se_sess = transport_init_session();
+ if (IS_ERR(se_sess))
+ return se_sess;
+
+ rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
+ if (rc < 0) {
+ transport_free_session(se_sess);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session_tags);
+
/*
* Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
*/
@@ -367,6 +410,10 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
void transport_free_session(struct se_session *se_sess)
{
+ if (se_sess->sess_cmd_map) {
+ percpu_ida_destroy(&se_sess->sess_tag_pool);
+ kfree(se_sess->sess_cmd_map);
+ }
kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);
@@ -1206,7 +1253,7 @@ int transport_handle_cdb_direct(
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
-static sense_reason_t
+sense_reason_t
transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{
@@ -1512,6 +1559,13 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
+ /*
+ * Handle special case for COMPARE_AND_WRITE failure, where the
+ * callback is expected to drop the per device ->caw_mutex.
+ */
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd);
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
@@ -1579,7 +1633,7 @@ queue_full:
}
EXPORT_SYMBOL(transport_generic_request_failure);
-static void __target_execute_cmd(struct se_cmd *cmd)
+void __target_execute_cmd(struct se_cmd *cmd)
{
sense_reason_t ret;
@@ -1784,7 +1838,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
ret = cmd->se_tfo->queue_data_in(cmd);
break;
case DMA_TO_DEVICE:
- if (cmd->t_bidi_data_sg) {
+ if (cmd->se_cmd_flags & SCF_BIDI) {
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret < 0)
break;
@@ -1856,10 +1910,25 @@ static void target_complete_ok_work(struct work_struct *work)
}
/*
* Check for a callback, used by amongst other things
- * XDWRITE_READ_10 emulation.
+ * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
*/
- if (cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd);
+ if (cmd->transport_complete_callback) {
+ sense_reason_t rc;
+
+ rc = cmd->transport_complete_callback(cmd);
+ if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+ return;
+ } else if (rc) {
+ ret = transport_send_check_condition_and_sense(cmd,
+ rc, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+ }
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
@@ -1885,7 +1954,7 @@ static void target_complete_ok_work(struct work_struct *work)
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
- if (cmd->t_bidi_data_sg) {
+ if (cmd->se_cmd_flags & SCF_BIDI) {
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep) {
cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
@@ -1930,10 +1999,29 @@ static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
kfree(sgl);
}
+static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+{
+ /*
+ * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
+ * emulation, and free + reset pointers if necessary..
+ */
+ if (!cmd->t_data_sg_orig)
+ return;
+
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = cmd->t_data_sg_orig;
+ cmd->t_data_sg_orig = NULL;
+ cmd->t_data_nents = cmd->t_data_nents_orig;
+ cmd->t_data_nents_orig = 0;
+}
+
static inline void transport_free_pages(struct se_cmd *cmd)
{
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ transport_reset_sgl_orig(cmd);
return;
+ }
+ transport_reset_sgl_orig(cmd);
transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
cmd->t_data_sg = NULL;
@@ -2029,24 +2117,22 @@ void transport_kunmap_data_sg(struct se_cmd *cmd)
}
EXPORT_SYMBOL(transport_kunmap_data_sg);
-static int
-transport_generic_get_mem(struct se_cmd *cmd)
+int
+target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
+ bool zero_page)
{
- u32 length = cmd->data_length;
- unsigned int nents;
+ struct scatterlist *sg;
struct page *page;
- gfp_t zero_flag;
+ gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
+ unsigned int nent;
int i = 0;
- nents = DIV_ROUND_UP(length, PAGE_SIZE);
- cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
- if (!cmd->t_data_sg)
+ nent = DIV_ROUND_UP(length, PAGE_SIZE);
+ sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
+ if (!sg)
return -ENOMEM;
- cmd->t_data_nents = nents;
- sg_init_table(cmd->t_data_sg, nents);
-
- zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
+ sg_init_table(sg, nent);
while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE);
@@ -2054,19 +2140,20 @@ transport_generic_get_mem(struct se_cmd *cmd)
if (!page)
goto out;
- sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
+ sg_set_page(&sg[i], page, page_len, 0);
length -= page_len;
i++;
}
+ *sgl = sg;
+ *nents = nent;
return 0;
out:
while (i > 0) {
i--;
- __free_page(sg_page(&cmd->t_data_sg[i]));
+ __free_page(sg_page(&sg[i]));
}
- kfree(cmd->t_data_sg);
- cmd->t_data_sg = NULL;
+ kfree(sg);
return -ENOMEM;
}
@@ -2087,7 +2174,27 @@ transport_generic_new_cmd(struct se_cmd *cmd)
*/
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
cmd->data_length) {
- ret = transport_generic_get_mem(cmd);
+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+ u32 bidi_length;
+
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
+ bidi_length = cmd->t_task_nolb *
+ cmd->se_dev->dev_attrib.block_size;
+ else
+ bidi_length = cmd->data_length;
+
+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+ &cmd->t_bidi_data_nents,
+ bidi_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
+ cmd->data_length, zero_flag);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -2740,6 +2847,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
buffer[SPC_ASC_KEY_OFFSET] = asc;
buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
break;
+ case TCM_MISCOMPARE_VERIFY:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE;
+ /* MISCOMPARE DURING VERIFY OPERATION */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
+ break;
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
default:
/* CURRENT ERROR */
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index bf0e390..b04467e 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -3,7 +3,7 @@
*
* This file contains logic for SPC-3 Unit Attention emulation
*
- * (c) Copyright 2009-2012 RisingTide Systems LLC.
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
new file mode 100644
index 0000000..4d22e7d
--- /dev/null
+++ b/drivers/target/target_core_xcopy.c
@@ -0,0 +1,1081 @@
+/*******************************************************************************
+ * Filename: target_core_xcopy.c
+ *
+ * This file contains support for SPC-4 Extended-Copy offload with generic
+ * TCM backends.
+ *
+ * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
+ *
+ * Author:
+ * Nicholas A. Bellinger <nab@daterainc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+#include "target_core_xcopy.h"
+
+static struct workqueue_struct *xcopy_wq = NULL;
+/*
+ * From target_core_spc.c
+ */
+extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+/*
+ * From target_core_device.c
+ */
+extern struct mutex g_device_mutex;
+extern struct list_head g_device_list;
+/*
+ * From target_core_configfs.c
+ */
+extern struct configfs_subsystem *target_core_subsystem[];
+
+static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
+{
+ int off = 0;
+
+ buf[off++] = (0x6 << 4);
+ buf[off++] = 0x01;
+ buf[off++] = 0x40;
+ buf[off] = (0x5 << 4);
+
+ spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+ return 0;
+}
+
+static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+ bool src)
+{
+ struct se_device *se_dev;
+ struct configfs_subsystem *subsys = target_core_subsystem[0];
+ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+ int rc;
+
+ if (src == true)
+ dev_wwn = &xop->dst_tid_wwn[0];
+ else
+ dev_wwn = &xop->src_tid_wwn[0];
+
+ mutex_lock(&g_device_mutex);
+ list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
+
+ memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+ target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+ if (rc != 0)
+ continue;
+
+ if (src == true) {
+ xop->dst_dev = se_dev;
+ pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
+ " se_dev\n", xop->dst_dev);
+ } else {
+ xop->src_dev = se_dev;
+ pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
+ " se_dev\n", xop->src_dev);
+ }
+
+ rc = configfs_depend_item(subsys,
+ &se_dev->dev_group.cg_item);
+ if (rc != 0) {
+ pr_err("configfs_depend_item attempt failed:"
+ " %d for se_dev: %p\n", rc, se_dev);
+ mutex_unlock(&g_device_mutex);
+ return rc;
+ }
+
+ pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
+ " se_dev->se_dev_group: %p\n", subsys, se_dev,
+ &se_dev->dev_group);
+
+ mutex_unlock(&g_device_mutex);
+ return 0;
+ }
+ mutex_unlock(&g_device_mutex);
+
+ pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
+}
+
+static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+ unsigned char *p, bool src)
+{
+ unsigned char *desc = p;
+ unsigned short ript;
+ u8 desig_len;
+ /*
+ * Extract RELATIVE INITIATOR PORT IDENTIFIER
+ */
+ ript = get_unaligned_be16(&desc[2]);
+ pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
+ /*
+ * Check for supported code set, association, and designator type
+ */
+ if ((desc[4] & 0x0f) != 0x1) {
+ pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
+ return -EINVAL;
+ }
+ if ((desc[5] & 0x30) != 0x00) {
+ pr_err("XCOPY 0xe4: association other than LUN not supported\n");
+ return -EINVAL;
+ }
+ if ((desc[5] & 0x0f) != 0x3) {
+ pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
+ (desc[5] & 0x0f));
+ return -EINVAL;
+ }
+ /*
+ * Check for matching 16 byte length for NAA IEEE Registered Extended
+ * Assigned designator
+ */
+ desig_len = desc[7];
+ if (desig_len != 16) {
+ pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
+ return -EINVAL;
+ }
+ pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
+ /*
+ * Check for NAA IEEE Registered Extended Assigned header..
+ */
+ if ((desc[8] & 0xf0) != 0x60) {
+ pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
+ (desc[8] & 0xf0));
+ return -EINVAL;
+ }
+
+ if (src == true) {
+ memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+ /*
+ * Determine if the source designator matches the local device
+ */
+ if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
+ XCOPY_NAA_IEEE_REGEX_LEN)) {
+ xop->op_origin = XCOL_SOURCE_RECV_OP;
+ xop->src_dev = se_cmd->se_dev;
+ pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
+ " received xop\n", xop->src_dev);
+ }
+ } else {
+ memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+ /*
+ * Determine if the destination designator matches the local device
+ */
+ if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
+ XCOPY_NAA_IEEE_REGEX_LEN)) {
+ xop->op_origin = XCOL_DEST_RECV_OP;
+ xop->dst_dev = se_cmd->se_dev;
+ pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
+ " received xop\n", xop->dst_dev);
+ }
+ }
+
+ return 0;
+}
+
+static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+ struct xcopy_op *xop, unsigned char *p,
+ unsigned short tdll)
+{
+ struct se_device *local_dev = se_cmd->se_dev;
+ unsigned char *desc = p;
+ int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+ unsigned short start = 0;
+ bool src = true;
+
+ if (offset != 0) {
+ pr_err("XCOPY target descriptor list length is not"
+ " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+ return -EINVAL;
+ }
+ if (tdll > 64) {
+ pr_err("XCOPY target descriptor supports a maximum"
+ " two src/dest descriptors, tdll: %hu too large..\n", tdll);
+ return -EINVAL;
+ }
+ /*
+ * Generate an IEEE Registered Extended designator based upon the
+ * se_device the XCOPY was received upon..
+ */
+ memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+ target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
+
+ while (start < tdll) {
+ /*
+ * Check target descriptor identification with 0xE4 type with
+ * use VPD 0x83 WWPN matching ..
+ */
+ switch (desc[0]) {
+ case 0xe4:
+ rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
+ &desc[0], src);
+ if (rc != 0)
+ goto out;
+ /*
+ * Assume target descriptors are in source -> destination order..
+ */
+ if (src == true)
+ src = false;
+ else
+ src = true;
+ start += XCOPY_TARGET_DESC_LEN;
+ desc += XCOPY_TARGET_DESC_LEN;
+ ret++;
+ break;
+ default:
+ pr_err("XCOPY unsupported descriptor type code:"
+ " 0x%02x\n", desc[0]);
+ goto out;
+ }
+ }
+
+ if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+ rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
+ else
+ rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+
+ if (rc < 0)
+ goto out;
+
+ pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
+ xop->src_dev, &xop->src_tid_wwn[0]);
+ pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
+ xop->dst_dev, &xop->dst_tid_wwn[0]);
+
+ return ret;
+
+out:
+ return -EINVAL;
+}
+
+static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
+ unsigned char *p)
+{
+ unsigned char *desc = p;
+ int dc = (desc[1] & 0x02);
+ unsigned short desc_len;
+
+ desc_len = get_unaligned_be16(&desc[2]);
+ if (desc_len != 0x18) {
+ pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
+ " %hu\n", desc_len);
+ return -EINVAL;
+ }
+
+ xop->stdi = get_unaligned_be16(&desc[4]);
+ xop->dtdi = get_unaligned_be16(&desc[6]);
+ pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
+ desc_len, xop->stdi, xop->dtdi, dc);
+
+ xop->nolb = get_unaligned_be16(&desc[10]);
+ xop->src_lba = get_unaligned_be64(&desc[12]);
+ xop->dst_lba = get_unaligned_be64(&desc[20]);
+ pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
+ xop->nolb, (unsigned long long)xop->src_lba,
+ (unsigned long long)xop->dst_lba);
+
+ if (dc != 0) {
+ xop->dbl = (desc[29] << 16) & 0xff;
+ xop->dbl |= (desc[30] << 8) & 0xff;
+ xop->dbl |= desc[31] & 0xff;
+
+ pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
+ }
+ return 0;
+}
+
+static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
+ struct xcopy_op *xop, unsigned char *p,
+ unsigned int sdll)
+{
+ unsigned char *desc = p;
+ unsigned int start = 0;
+ int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+
+ if (offset != 0) {
+ pr_err("XCOPY segment descriptor list length is not"
+ " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+ return -EINVAL;
+ }
+
+ while (start < sdll) {
+ /*
+ * Check segment descriptor type code for block -> block
+ */
+ switch (desc[0]) {
+ case 0x02:
+ rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
+ if (rc < 0)
+ goto out;
+
+ ret++;
+ start += XCOPY_SEGMENT_DESC_LEN;
+ desc += XCOPY_SEGMENT_DESC_LEN;
+ break;
+ default:
+ pr_err("XCOPY unspported segment descriptor"
+ "type: 0x%02x\n", desc[0]);
+ goto out;
+ }
+ }
+
+ return ret;
+
+out:
+ return -EINVAL;
+}
+
+/*
+ * Start xcopy_pt ops
+ */
+
+struct xcopy_pt_cmd {
+ bool remote_port;
+ struct se_cmd se_cmd;
+ struct xcopy_op *xcopy_op;
+ struct completion xpt_passthrough_sem;
+};
+
+static struct se_port xcopy_pt_port;
+static struct se_portal_group xcopy_pt_tpg;
+static struct se_session xcopy_pt_sess;
+static struct se_node_acl xcopy_pt_nacl;
+
+static char *xcopy_pt_get_fabric_name(void)
+{
+ return "xcopy-pt";
+}
+
+static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
+{
+ struct configfs_subsystem *subsys = target_core_subsystem[0];
+ struct se_device *remote_dev;
+
+ if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+ remote_dev = xop->dst_dev;
+ else
+ remote_dev = xop->src_dev;
+
+ pr_debug("Calling configfs_undepend_item for subsys: %p"
+ " remote_dev: %p remote_dev->dev_group: %p\n",
+ subsys, remote_dev, &remote_dev->dev_group.cg_item);
+
+ configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
+}
+
+static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
+{
+ struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+ struct xcopy_pt_cmd, se_cmd);
+
+ if (xpt_cmd->remote_port)
+ kfree(se_cmd->se_lun);
+
+ kfree(xpt_cmd);
+}
+
+static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+ struct xcopy_pt_cmd, se_cmd);
+
+ complete(&xpt_cmd->xpt_passthrough_sem);
+ return 0;
+}
+
+static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static struct target_core_fabric_ops xcopy_pt_tfo = {
+ .get_fabric_name = xcopy_pt_get_fabric_name,
+ .get_task_tag = xcopy_pt_get_tag,
+ .get_cmd_state = xcopy_pt_get_cmd_state,
+ .release_cmd = xcopy_pt_release_cmd,
+ .check_stop_free = xcopy_pt_check_stop_free,
+ .write_pending = xcopy_pt_write_pending,
+ .write_pending_status = xcopy_pt_write_pending_status,
+ .queue_data_in = xcopy_pt_queue_data_in,
+ .queue_status = xcopy_pt_queue_status,
+};
+
+/*
+ * End xcopy_pt_ops
+ */
+
+int target_xcopy_setup_pt(void)
+{
+ xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+ if (!xcopy_wq) {
+ pr_err("Unable to allocate xcopy_wq\n");
+ return -ENOMEM;
+ }
+
+ memset(&xcopy_pt_port, 0, sizeof(struct se_port));
+ INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
+ INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
+ mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
+
+ memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
+ INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
+ INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
+ INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
+
+ xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
+ xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
+
+ memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
+ INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
+ INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
+ memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+ INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
+ INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
+
+ xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+ xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+
+ xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
+ xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
+
+ return 0;
+}
+
+void target_xcopy_release_pt(void)
+{
+ if (xcopy_wq)
+ destroy_workqueue(xcopy_wq);
+}
+
+static void target_xcopy_setup_pt_port(
+ struct xcopy_pt_cmd *xpt_cmd,
+ struct xcopy_op *xop,
+ bool remote_port)
+{
+ struct se_cmd *ec_cmd = xop->xop_se_cmd;
+ struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
+
+ if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
+ /*
+ * Honor destination port reservations for X-COPY PUSH emulation
+ * when CDB is received on local source port, and READs blocks to
+ * WRITE on remote destination port.
+ */
+ if (remote_port) {
+ xpt_cmd->remote_port = remote_port;
+ pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
+ pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
+ " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
+ pt_cmd->se_lun->lun_sep);
+ } else {
+ pt_cmd->se_lun = ec_cmd->se_lun;
+ pt_cmd->se_dev = ec_cmd->se_dev;
+
+ pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
+ " %p\n", pt_cmd->se_dev);
+ pt_cmd->se_lun = ec_cmd->se_lun;
+ pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
+ pt_cmd->se_lun);
+ }
+ } else {
+ /*
+ * Honor source port reservation for X-COPY PULL emulation
+ * when CDB is received on local desintation port, and READs
+ * blocks from the remote source port to WRITE on local
+ * destination port.
+ */
+ if (remote_port) {
+ xpt_cmd->remote_port = remote_port;
+ pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
+ pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
+ " cmd->se_lun->lun_sep for X-COPY data PULL\n",
+ pt_cmd->se_lun->lun_sep);
+ } else {
+ pt_cmd->se_lun = ec_cmd->se_lun;
+ pt_cmd->se_dev = ec_cmd->se_dev;
+
+ pr_debug("Honoring local DST port from ec_cmd->se_dev:"
+ " %p\n", pt_cmd->se_dev);
+ pt_cmd->se_lun = ec_cmd->se_lun;
+ pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
+ pt_cmd->se_lun);
+ }
+ }
+}
+
+static int target_xcopy_init_pt_lun(
+ struct xcopy_pt_cmd *xpt_cmd,
+ struct xcopy_op *xop,
+ struct se_device *se_dev,
+ struct se_cmd *pt_cmd,
+ bool remote_port)
+{
+ /*
+ * Don't allocate + init an pt_cmd->se_lun if honoring local port for
+ * reservations. The pt_cmd->se_lun pointer will be setup from within
+ * target_xcopy_setup_pt_port()
+ */
+ if (remote_port == false) {
+ pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
+ return 0;
+ }
+
+ pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL);
+ if (!pt_cmd->se_lun) {
+ pr_err("Unable to allocate pt_cmd->se_lun\n");
+ return -ENOMEM;
+ }
+ init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
+ INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
+ spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
+ spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
+ spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
+
+ pt_cmd->se_dev = se_dev;
+
+ pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
+ pt_cmd->se_lun->lun_se_dev = se_dev;
+ pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
+
+ pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
+ pt_cmd->se_lun->lun_se_dev);
+
+ return 0;
+}
+
+static int target_xcopy_setup_pt_cmd(
+ struct xcopy_pt_cmd *xpt_cmd,
+ struct xcopy_op *xop,
+ struct se_device *se_dev,
+ unsigned char *cdb,
+ bool remote_port,
+ bool alloc_mem)
+{
+ struct se_cmd *cmd = &xpt_cmd->se_cmd;
+ sense_reason_t sense_rc;
+ int ret = 0, rc;
+ /*
+ * Setup LUN+port to honor reservations based upon xop->op_origin for
+ * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
+ */
+ rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ xpt_cmd->xcopy_op = xop;
+ target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
+
+ sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
+ if (sense_rc) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (alloc_mem) {
+ rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
+ cmd->data_length, false);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ /*
+ * Set this bit so that transport_free_pages() allows the
+ * caller to release SGLs + physical memory allocated by
+ * transport_generic_get_mem()..
+ */
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ } else {
+ /*
+ * Here the previously allocated SGLs for the internal READ
+ * are mapped zero-copy to the internal WRITE.
+ */
+ sense_rc = transport_generic_map_mem_to_cmd(cmd,
+ xop->xop_data_sg, xop->xop_data_nents,
+ NULL, 0);
+ if (sense_rc) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
+ " %u\n", cmd->t_data_sg, cmd->t_data_nents);
+ }
+
+ return 0;
+
+out:
+ if (remote_port == true)
+ kfree(cmd->se_lun);
+ return ret;
+}
+
+static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
+{
+ struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
+ sense_reason_t sense_rc;
+
+ sense_rc = transport_generic_new_cmd(se_cmd);
+ if (sense_rc)
+ return -EINVAL;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ target_execute_cmd(se_cmd);
+
+ wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
+
+ pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
+ se_cmd->scsi_status);
+ return 0;
+}
+
+static int target_xcopy_read_source(
+ struct se_cmd *ec_cmd,
+ struct xcopy_op *xop,
+ struct se_device *src_dev,
+ sector_t src_lba,
+ u32 src_sectors)
+{
+ struct xcopy_pt_cmd *xpt_cmd;
+ struct se_cmd *se_cmd;
+ u32 length = (src_sectors * src_dev->dev_attrib.block_size);
+ int rc;
+ unsigned char cdb[16];
+ bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
+
+ xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
+ if (!xpt_cmd) {
+ pr_err("Unable to allocate xcopy_pt_cmd\n");
+ return -ENOMEM;
+ }
+ init_completion(&xpt_cmd->xpt_passthrough_sem);
+ se_cmd = &xpt_cmd->se_cmd;
+
+ memset(&cdb[0], 0, 16);
+ cdb[0] = READ_16;
+ put_unaligned_be64(src_lba, &cdb[2]);
+ put_unaligned_be32(src_sectors, &cdb[10]);
+ pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
+ (unsigned long long)src_lba, src_sectors, length);
+
+ transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+ DMA_FROM_DEVICE, 0, NULL);
+ xop->src_pt_cmd = xpt_cmd;
+
+ rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
+ remote_port, true);
+ if (rc < 0) {
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+
+ xop->xop_data_sg = se_cmd->t_data_sg;
+ xop->xop_data_nents = se_cmd->t_data_nents;
+ pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
+ " memory\n", xop->xop_data_sg, xop->xop_data_nents);
+
+ rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+ if (rc < 0) {
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+ /*
+ * Clear off the allocated t_data_sg, that has been saved for
+ * zero-copy WRITE submission reuse in struct xcopy_op..
+ */
+ se_cmd->t_data_sg = NULL;
+ se_cmd->t_data_nents = 0;
+
+ return 0;
+}
+
+static int target_xcopy_write_destination(
+ struct se_cmd *ec_cmd,
+ struct xcopy_op *xop,
+ struct se_device *dst_dev,
+ sector_t dst_lba,
+ u32 dst_sectors)
+{
+ struct xcopy_pt_cmd *xpt_cmd;
+ struct se_cmd *se_cmd;
+ u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
+ int rc;
+ unsigned char cdb[16];
+ bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
+
+ xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
+ if (!xpt_cmd) {
+ pr_err("Unable to allocate xcopy_pt_cmd\n");
+ return -ENOMEM;
+ }
+ init_completion(&xpt_cmd->xpt_passthrough_sem);
+ se_cmd = &xpt_cmd->se_cmd;
+
+ memset(&cdb[0], 0, 16);
+ cdb[0] = WRITE_16;
+ put_unaligned_be64(dst_lba, &cdb[2]);
+ put_unaligned_be32(dst_sectors, &cdb[10]);
+ pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
+ (unsigned long long)dst_lba, dst_sectors, length);
+
+ transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+ DMA_TO_DEVICE, 0, NULL);
+ xop->dst_pt_cmd = xpt_cmd;
+
+ rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
+ remote_port, false);
+ if (rc < 0) {
+ struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+ /*
+ * If the failure happened before the t_mem_list hand-off in
+ * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
+ * core releases this memory on error during X-COPY WRITE I/O.
+ */
+ src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ src_cmd->t_data_sg = xop->xop_data_sg;
+ src_cmd->t_data_nents = xop->xop_data_nents;
+
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+
+ rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+ if (rc < 0) {
+ se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void target_xcopy_do_work(struct work_struct *work)
+{
+ struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
+ struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
+ struct se_cmd *ec_cmd = xop->xop_se_cmd;
+ sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
+ unsigned int max_sectors;
+ int rc;
+ unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
+
+ end_lba = src_lba + nolb;
+ /*
+ * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
+ * smallest max_sectors between src_dev + dev_dev, or
+ */
+ max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
+ dst_dev->dev_attrib.hw_max_sectors);
+ max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
+
+ max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
+
+ pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
+ nolb, max_nolb, (unsigned long long)end_lba);
+ pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
+ (unsigned long long)src_lba, (unsigned long long)dst_lba);
+
+ while (src_lba < end_lba) {
+ cur_nolb = min(nolb, max_nolb);
+
+ pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
+ " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
+
+ rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
+ if (rc < 0)
+ goto out;
+
+ src_lba += cur_nolb;
+ pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
+ (unsigned long long)src_lba);
+
+ pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
+ " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
+
+ rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
+ dst_lba, cur_nolb);
+ if (rc < 0) {
+ transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+ goto out;
+ }
+
+ dst_lba += cur_nolb;
+ pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
+ (unsigned long long)dst_lba);
+
+ copied_nolb += cur_nolb;
+ nolb -= cur_nolb;
+
+ transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+ xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+ transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
+ }
+
+ xcopy_pt_undepend_remotedev(xop);
+ kfree(xop);
+
+ pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
+ (unsigned long long)src_lba, (unsigned long long)dst_lba);
+ pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
+ copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
+
+ pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
+ target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
+ return;
+
+out:
+ xcopy_pt_undepend_remotedev(xop);
+ kfree(xop);
+
+ pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
+ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+}
+
+sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+{
+ struct xcopy_op *xop = NULL;
+ unsigned char *p = NULL, *seg_desc;
+ unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
+ int rc;
+ unsigned short tdll;
+
+ sa = se_cmd->t_task_cdb[1] & 0x1f;
+ if (sa != 0x00) {
+ pr_err("EXTENDED_COPY(LID4) not supported\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ p = transport_kmap_data_sg(se_cmd);
+ if (!p) {
+ pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+
+ list_id = p[0];
+ if (list_id != 0x00) {
+ pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id);
+ goto out;
+ }
+ list_id_usage = (p[1] & 0x18);
+ /*
+ * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
+ */
+ tdll = get_unaligned_be16(&p[2]);
+ sdll = get_unaligned_be32(&p[8]);
+
+ inline_dl = get_unaligned_be32(&p[12]);
+ if (inline_dl != 0) {
+ pr_err("XCOPY with non zero inline data length\n");
+ goto out;
+ }
+
+ xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+ if (!xop) {
+ pr_err("Unable to allocate xcopy_op\n");
+ goto out;
+ }
+ xop->xop_se_cmd = se_cmd;
+
+ pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
+ " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
+ tdll, sdll, inline_dl);
+
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+ if (rc <= 0)
+ goto out;
+
+ pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
+ rc * XCOPY_TARGET_DESC_LEN);
+ seg_desc = &p[16];
+ seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
+
+ rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
+ if (rc <= 0) {
+ xcopy_pt_undepend_remotedev(xop);
+ goto out;
+ }
+ transport_kunmap_data_sg(se_cmd);
+
+ pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+ rc * XCOPY_SEGMENT_DESC_LEN);
+ INIT_WORK(&xop->xop_work, target_xcopy_do_work);
+ queue_work(xcopy_wq, &xop->xop_work);
+ return TCM_NO_SENSE;
+
+out:
+ if (p)
+ transport_kunmap_data_sg(se_cmd);
+ kfree(xop);
+ return TCM_INVALID_CDB_FIELD;
+}
+
+static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
+{
+ unsigned char *p;
+
+ p = transport_kmap_data_sg(se_cmd);
+ if (!p) {
+ pr_err("transport_kmap_data_sg failed in"
+ " target_rcr_operating_parameters\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+
+ if (se_cmd->data_length < 54) {
+ pr_err("Receive Copy Results Op Parameters length"
+ " too small: %u\n", se_cmd->data_length);
+ transport_kunmap_data_sg(se_cmd);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Set SNLID=1 (Supports no List ID)
+ */
+ p[4] = 0x1;
+ /*
+ * MAXIMUM TARGET DESCRIPTOR COUNT
+ */
+ put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
+ /*
+ * MAXIMUM SEGMENT DESCRIPTOR COUNT
+ */
+ put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
+ /*
+ * MAXIMUM DESCRIPTOR LIST LENGTH
+ */
+ put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
+ /*
+ * MAXIMUM SEGMENT LENGTH
+ */
+ put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
+ /*
+ * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
+ */
+ put_unaligned_be32(0x0, &p[20]);
+ /*
+ * HELD DATA LIMIT
+ */
+ put_unaligned_be32(0x0, &p[24]);
+ /*
+ * MAXIMUM STREAM DEVICE TRANSFER SIZE
+ */
+ put_unaligned_be32(0x0, &p[28]);
+ /*
+ * TOTAL CONCURRENT COPIES
+ */
+ put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
+ /*
+ * MAXIMUM CONCURRENT COPIES
+ */
+ p[36] = RCR_OP_MAX_CONCURR_COPIES;
+ /*
+ * DATA SEGMENT GRANULARITY (log 2)
+ */
+ p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
+ /*
+ * INLINE DATA GRANULARITY log 2)
+ */
+ p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
+ /*
+ * HELD DATA GRANULARITY
+ */
+ p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
+ /*
+ * IMPLEMENTED DESCRIPTOR LIST LENGTH
+ */
+ p[43] = 0x2;
+ /*
+ * List of implemented descriptor type codes (ordered)
+ */
+ p[44] = 0x02; /* Copy Block to Block device */
+ p[45] = 0xe4; /* Identification descriptor target descriptor */
+
+ /*
+ * AVAILABLE DATA (n-3)
+ */
+ put_unaligned_be32(42, &p[0]);
+
+ transport_kunmap_data_sg(se_cmd);
+ target_complete_cmd(se_cmd, GOOD);
+
+ return TCM_NO_SENSE;
+}
+
+sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
+{
+ unsigned char *cdb = &se_cmd->t_task_cdb[0];
+ int sa = (cdb[1] & 0x1f), list_id = cdb[2];
+ sense_reason_t rc = TCM_NO_SENSE;
+
+ pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
+ " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
+
+ if (list_id != 0) {
+ pr_err("Receive Copy Results with non zero list identifier"
+ " not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ switch (sa) {
+ case RCR_SA_OPERATING_PARAMETERS:
+ rc = target_rcr_operating_parameters(se_cmd);
+ break;
+ case RCR_SA_COPY_STATUS:
+ case RCR_SA_RECEIVE_DATA:
+ case RCR_SA_FAILED_SEGMENT_DETAILS:
+ default:
+ pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ return rc;
+}
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
new file mode 100644
index 0000000..700a981
--- /dev/null
+++ b/drivers/target/target_core_xcopy.h
@@ -0,0 +1,62 @@
+#define XCOPY_TARGET_DESC_LEN 32
+#define XCOPY_SEGMENT_DESC_LEN 28
+#define XCOPY_NAA_IEEE_REGEX_LEN 16
+#define XCOPY_MAX_SECTORS 1024
+
+enum xcopy_origin_list {
+ XCOL_SOURCE_RECV_OP = 0x01,
+ XCOL_DEST_RECV_OP = 0x02,
+};
+
+struct xcopy_pt_cmd;
+
+struct xcopy_op {
+ int op_origin;
+
+ struct se_cmd *xop_se_cmd;
+ struct se_device *src_dev;
+ unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct se_device *dst_dev;
+ unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+
+ sector_t src_lba;
+ sector_t dst_lba;
+ unsigned short stdi;
+ unsigned short dtdi;
+ unsigned short nolb;
+ unsigned int dbl;
+
+ struct xcopy_pt_cmd *src_pt_cmd;
+ struct xcopy_pt_cmd *dst_pt_cmd;
+
+ u32 xop_data_nents;
+ struct scatterlist *xop_data_sg;
+ struct work_struct xop_work;
+};
+
+/*
+ * Receive Copy Results Sevice Actions
+ */
+#define RCR_SA_COPY_STATUS 0x00
+#define RCR_SA_RECEIVE_DATA 0x01
+#define RCR_SA_OPERATING_PARAMETERS 0x03
+#define RCR_SA_FAILED_SEGMENT_DETAILS 0x04
+
+/*
+ * Receive Copy Results defs for Operating Parameters
+ */
+#define RCR_OP_MAX_TARGET_DESC_COUNT 0x2
+#define RCR_OP_MAX_SG_DESC_COUNT 0x1
+#define RCR_OP_MAX_DESC_LIST_LEN 1024
+#define RCR_OP_MAX_SEGMENT_LEN 268435456 /* 256 MB */
+#define RCR_OP_TOTAL_CONCURR_COPIES 0x1 /* Must be <= 16384 */
+#define RCR_OP_MAX_CONCURR_COPIES 0x1 /* Must be <= 255 */
+#define RCR_OP_DATA_SEG_GRAN_LOG2 9 /* 512 bytes in log 2 */
+#define RCR_OP_INLINE_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
+#define RCR_OP_HELD_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
+
+extern int target_xcopy_setup_pt(void);
+extern void target_xcopy_release_pt(void);
+extern sense_reason_t target_do_xcopy(struct se_cmd *);
+extern sense_reason_t target_do_receive_copy_results(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index b74feb0..4e00508 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -311,7 +311,11 @@ static struct se_portal_group *ft_add_tpg(
*/
if (strstr(name, "tpgt_") != name)
return NULL;
- if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
+
+ ret = kstrtoul(name + 5, 10, &index);
+ if (ret)
+ return NULL;
+ if (index > UINT_MAX)
return NULL;
lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index e988c81..dbfc390 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -17,8 +17,17 @@ if THERMAL
config THERMAL_HWMON
bool
+ prompt "Expose thermal sensors as hwmon device"
depends on HWMON=y || HWMON=THERMAL
default y
+ help
+ In case a sensor is registered with the thermal
+ framework, this option will also register it
+ as a hwmon. The sensor will then have the common
+ hwmon sysfs interface.
+
+ Say 'Y' here if you want all thermal sensors to
+ have hwmon sysfs interface too.
choice
prompt "Default Thermal governor"
@@ -91,6 +100,17 @@ config THERMAL_EMULATION
because userland can easily disable the thermal policy by simply
flooding this sysfs node with low temperature values.
+config IMX_THERMAL
+ tristate "Temperature sensor driver for Freescale i.MX SoCs"
+ depends on CPU_THERMAL
+ depends on MFD_SYSCON
+ depends on OF
+ help
+ Support for Temperature Monitor (TEMPMON) found on Freescale i.MX SoCs.
+ It supports one critical trip point and one passive trip point. The
+ cpufreq is used as the cooling device to throttle CPUs when the
+ passive trip is crossed.
+
config SPEAR_THERMAL
bool "SPEAr thermal sensor driver"
depends on PLAT_SPEAR
@@ -114,14 +134,6 @@ config KIRKWOOD_THERMAL
Support for the Kirkwood thermal sensor driver into the Linux thermal
framework. Only kirkwood 88F6282 and 88F6283 have this sensor.
-config EXYNOS_THERMAL
- tristate "Temperature sensor on Samsung EXYNOS"
- depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5)
- depends on CPU_THERMAL
- help
- If you say yes here you get support for TMU (Thermal Management
- Unit) on SAMSUNG EXYNOS series of SoC.
-
config DOVE_THERMAL
tristate "Temperature sensor on Marvell Dove SoCs"
depends on ARCH_DOVE
@@ -184,4 +196,9 @@ menu "Texas Instruments thermal drivers"
source "drivers/thermal/ti-soc-thermal/Kconfig"
endmenu
+menu "Samsung thermal drivers"
+depends on PLAT_SAMSUNG
+source "drivers/thermal/samsung/Kconfig"
+endmenu
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 67184a2..584b363 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -5,6 +5,9 @@
obj-$(CONFIG_THERMAL) += thermal_sys.o
thermal_sys-y += thermal_core.o
+# interface to/from other layers providing sensors
+thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o
+
# governors
thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
@@ -17,10 +20,11 @@ thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
-obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
+obj-y += samsung/
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
+obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 82e15db..d179028 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -322,6 +322,8 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
max_freq = notify_device->cpufreq_val;
+ else
+ return 0;
/* Never exceed user_policy.max */
if (max_freq > policy->user_policy.max)
@@ -496,8 +498,12 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
- struct cpufreq_cooling_device *cpufreq_dev = cdev->devdata;
+ struct cpufreq_cooling_device *cpufreq_dev;
+
+ if (!cdev)
+ return;
+ cpufreq_dev = cdev->devdata;
mutex_lock(&cooling_cpufreq_lock);
cpufreq_dev_count--;
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
deleted file mode 100644
index 9af4b93..0000000
--- a/drivers/thermal/exynos_thermal.c
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- * exynos_thermal.c - Samsung EXYNOS TMU (Thermal Management Unit)
- *
- * Copyright (C) 2011 Samsung Electronics
- * Donggeun Kim <dg77.kim@samsung.com>
- * Amit Daniel Kachhap <amit.kachhap@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/workqueue.h>
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/platform_data/exynos_thermal.h>
-#include <linux/thermal.h>
-#include <linux/cpufreq.h>
-#include <linux/cpu_cooling.h>
-#include <linux/of.h>
-
-/* Exynos generic registers */
-#define EXYNOS_TMU_REG_TRIMINFO 0x0
-#define EXYNOS_TMU_REG_CONTROL 0x20
-#define EXYNOS_TMU_REG_STATUS 0x28
-#define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
-#define EXYNOS_TMU_REG_INTEN 0x70
-#define EXYNOS_TMU_REG_INTSTAT 0x74
-#define EXYNOS_TMU_REG_INTCLEAR 0x78
-
-#define EXYNOS_TMU_TRIM_TEMP_MASK 0xff
-#define EXYNOS_TMU_GAIN_SHIFT 8
-#define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
-#define EXYNOS_TMU_CORE_ON 3
-#define EXYNOS_TMU_CORE_OFF 2
-#define EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET 50
-
-/* Exynos4210 specific registers */
-#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C
-#define EXYNOS4210_TMU_REG_PAST_TEMP0 0x60
-#define EXYNOS4210_TMU_REG_PAST_TEMP1 0x64
-#define EXYNOS4210_TMU_REG_PAST_TEMP2 0x68
-#define EXYNOS4210_TMU_REG_PAST_TEMP3 0x6C
-
-#define EXYNOS4210_TMU_TRIG_LEVEL0_MASK 0x1
-#define EXYNOS4210_TMU_TRIG_LEVEL1_MASK 0x10
-#define EXYNOS4210_TMU_TRIG_LEVEL2_MASK 0x100
-#define EXYNOS4210_TMU_TRIG_LEVEL3_MASK 0x1000
-#define EXYNOS4210_TMU_INTCLEAR_VAL 0x1111
-
-/* Exynos5250 and Exynos4412 specific registers */
-#define EXYNOS_TMU_TRIMINFO_CON 0x14
-#define EXYNOS_THD_TEMP_RISE 0x50
-#define EXYNOS_THD_TEMP_FALL 0x54
-#define EXYNOS_EMUL_CON 0x80
-
-#define EXYNOS_TRIMINFO_RELOAD 0x1
-#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
-#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12)
-#define EXYNOS_MUX_ADDR_VALUE 6
-#define EXYNOS_MUX_ADDR_SHIFT 20
-#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
-
-#define EFUSE_MIN_VALUE 40
-#define EFUSE_MAX_VALUE 100
-
-/* In-kernel thermal framework related macros & definations */
-#define SENSOR_NAME_LEN 16
-#define MAX_TRIP_COUNT 8
-#define MAX_COOLING_DEVICE 4
-#define MAX_THRESHOLD_LEVS 4
-
-#define ACTIVE_INTERVAL 500
-#define IDLE_INTERVAL 10000
-#define MCELSIUS 1000
-
-#ifdef CONFIG_THERMAL_EMULATION
-#define EXYNOS_EMUL_TIME 0x57F0
-#define EXYNOS_EMUL_TIME_SHIFT 16
-#define EXYNOS_EMUL_DATA_SHIFT 8
-#define EXYNOS_EMUL_DATA_MASK 0xFF
-#define EXYNOS_EMUL_ENABLE 0x1
-#endif /* CONFIG_THERMAL_EMULATION */
-
-/* CPU Zone information */
-#define PANIC_ZONE 4
-#define WARN_ZONE 3
-#define MONITOR_ZONE 2
-#define SAFE_ZONE 1
-
-#define GET_ZONE(trip) (trip + 2)
-#define GET_TRIP(zone) (zone - 2)
-
-#define EXYNOS_ZONE_COUNT 3
-
-struct exynos_tmu_data {
- struct exynos_tmu_platform_data *pdata;
- struct resource *mem;
- void __iomem *base;
- int irq;
- enum soc_type soc;
- struct work_struct irq_work;
- struct mutex lock;
- struct clk *clk;
- u8 temp_error1, temp_error2;
-};
-
-struct thermal_trip_point_conf {
- int trip_val[MAX_TRIP_COUNT];
- int trip_count;
- u8 trigger_falling;
-};
-
-struct thermal_cooling_conf {
- struct freq_clip_table freq_data[MAX_TRIP_COUNT];
- int freq_clip_count;
-};
-
-struct thermal_sensor_conf {
- char name[SENSOR_NAME_LEN];
- int (*read_temperature)(void *data);
- int (*write_emul_temp)(void *drv_data, unsigned long temp);
- struct thermal_trip_point_conf trip_data;
- struct thermal_cooling_conf cooling_data;
- void *private_data;
-};
-
-struct exynos_thermal_zone {
- enum thermal_device_mode mode;
- struct thermal_zone_device *therm_dev;
- struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
- unsigned int cool_dev_size;
- struct platform_device *exynos4_dev;
- struct thermal_sensor_conf *sensor_conf;
- bool bind;
-};
-
-static struct exynos_thermal_zone *th_zone;
-static void exynos_unregister_thermal(void);
-static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
-
-/* Get mode callback functions for thermal zone */
-static int exynos_get_mode(struct thermal_zone_device *thermal,
- enum thermal_device_mode *mode)
-{
- if (th_zone)
- *mode = th_zone->mode;
- return 0;
-}
-
-/* Set mode callback functions for thermal zone */
-static int exynos_set_mode(struct thermal_zone_device *thermal,
- enum thermal_device_mode mode)
-{
- if (!th_zone->therm_dev) {
- pr_notice("thermal zone not registered\n");
- return 0;
- }
-
- mutex_lock(&th_zone->therm_dev->lock);
-
- if (mode == THERMAL_DEVICE_ENABLED &&
- !th_zone->sensor_conf->trip_data.trigger_falling)
- th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
- else
- th_zone->therm_dev->polling_delay = 0;
-
- mutex_unlock(&th_zone->therm_dev->lock);
-
- th_zone->mode = mode;
- thermal_zone_device_update(th_zone->therm_dev);
- pr_info("thermal polling set for duration=%d msec\n",
- th_zone->therm_dev->polling_delay);
- return 0;
-}
-
-
-/* Get trip type callback functions for thermal zone */
-static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
- enum thermal_trip_type *type)
-{
- switch (GET_ZONE(trip)) {
- case MONITOR_ZONE:
- case WARN_ZONE:
- *type = THERMAL_TRIP_ACTIVE;
- break;
- case PANIC_ZONE:
- *type = THERMAL_TRIP_CRITICAL;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-/* Get trip temperature callback functions for thermal zone */
-static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
- unsigned long *temp)
-{
- if (trip < GET_TRIP(MONITOR_ZONE) || trip > GET_TRIP(PANIC_ZONE))
- return -EINVAL;
-
- *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
- /* convert the temperature into millicelsius */
- *temp = *temp * MCELSIUS;
-
- return 0;
-}
-
-/* Get critical temperature callback functions for thermal zone */
-static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
-{
- int ret;
- /* Panic zone */
- ret = exynos_get_trip_temp(thermal, GET_TRIP(PANIC_ZONE), temp);
- return ret;
-}
-
-/* Bind callback functions for thermal zone */
-static int exynos_bind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- int ret = 0, i, tab_size, level;
- struct freq_clip_table *tab_ptr, *clip_data;
- struct thermal_sensor_conf *data = th_zone->sensor_conf;
-
- tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
- tab_size = data->cooling_data.freq_clip_count;
-
- if (tab_ptr == NULL || tab_size == 0)
- return -EINVAL;
-
- /* find the cooling device registered*/
- for (i = 0; i < th_zone->cool_dev_size; i++)
- if (cdev == th_zone->cool_dev[i])
- break;
-
- /* No matching cooling device */
- if (i == th_zone->cool_dev_size)
- return 0;
-
- /* Bind the thermal zone to the cpufreq cooling device */
- for (i = 0; i < tab_size; i++) {
- clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
- level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
- if (level == THERMAL_CSTATE_INVALID)
- return 0;
- switch (GET_ZONE(i)) {
- case MONITOR_ZONE:
- case WARN_ZONE:
- if (thermal_zone_bind_cooling_device(thermal, i, cdev,
- level, 0)) {
- pr_err("error binding cdev inst %d\n", i);
- ret = -EINVAL;
- }
- th_zone->bind = true;
- break;
- default:
- ret = -EINVAL;
- }
- }
-
- return ret;
-}
-
-/* Unbind callback functions for thermal zone */
-static int exynos_unbind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- int ret = 0, i, tab_size;
- struct thermal_sensor_conf *data = th_zone->sensor_conf;
-
- if (th_zone->bind == false)
- return 0;
-
- tab_size = data->cooling_data.freq_clip_count;
-
- if (tab_size == 0)
- return -EINVAL;
-
- /* find the cooling device registered*/
- for (i = 0; i < th_zone->cool_dev_size; i++)
- if (cdev == th_zone->cool_dev[i])
- break;
-
- /* No matching cooling device */
- if (i == th_zone->cool_dev_size)
- return 0;
-
- /* Bind the thermal zone to the cpufreq cooling device */
- for (i = 0; i < tab_size; i++) {
- switch (GET_ZONE(i)) {
- case MONITOR_ZONE:
- case WARN_ZONE:
- if (thermal_zone_unbind_cooling_device(thermal, i,
- cdev)) {
- pr_err("error unbinding cdev inst=%d\n", i);
- ret = -EINVAL;
- }
- th_zone->bind = false;
- break;
- default:
- ret = -EINVAL;
- }
- }
- return ret;
-}
-
-/* Get temperature callback functions for thermal zone */
-static int exynos_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
-{
- void *data;
-
- if (!th_zone->sensor_conf) {
- pr_info("Temperature sensor not initialised\n");
- return -EINVAL;
- }
- data = th_zone->sensor_conf->private_data;
- *temp = th_zone->sensor_conf->read_temperature(data);
- /* convert the temperature into millicelsius */
- *temp = *temp * MCELSIUS;
- return 0;
-}
-
-/* Get temperature callback functions for thermal zone */
-static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
- unsigned long temp)
-{
- void *data;
- int ret = -EINVAL;
-
- if (!th_zone->sensor_conf) {
- pr_info("Temperature sensor not initialised\n");
- return -EINVAL;
- }
- data = th_zone->sensor_conf->private_data;
- if (th_zone->sensor_conf->write_emul_temp)
- ret = th_zone->sensor_conf->write_emul_temp(data, temp);
- return ret;
-}
-
-/* Get the temperature trend */
-static int exynos_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
-{
- int ret;
- unsigned long trip_temp;
-
- ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
- if (ret < 0)
- return ret;
-
- if (thermal->temperature >= trip_temp)
- *trend = THERMAL_TREND_RAISE_FULL;
- else
- *trend = THERMAL_TREND_DROP_FULL;
-
- return 0;
-}
-/* Operation callback functions for thermal zone */
-static struct thermal_zone_device_ops const exynos_dev_ops = {
- .bind = exynos_bind,
- .unbind = exynos_unbind,
- .get_temp = exynos_get_temp,
- .set_emul_temp = exynos_set_emul_temp,
- .get_trend = exynos_get_trend,
- .get_mode = exynos_get_mode,
- .set_mode = exynos_set_mode,
- .get_trip_type = exynos_get_trip_type,
- .get_trip_temp = exynos_get_trip_temp,
- .get_crit_temp = exynos_get_crit_temp,
-};
-
-/*
- * This function may be called from interrupt based temperature sensor
- * when threshold is changed.
- */
-static void exynos_report_trigger(void)
-{
- unsigned int i;
- char data[10];
- char *envp[] = { data, NULL };
-
- if (!th_zone || !th_zone->therm_dev)
- return;
- if (th_zone->bind == false) {
- for (i = 0; i < th_zone->cool_dev_size; i++) {
- if (!th_zone->cool_dev[i])
- continue;
- exynos_bind(th_zone->therm_dev,
- th_zone->cool_dev[i]);
- }
- }
-
- thermal_zone_device_update(th_zone->therm_dev);
-
- mutex_lock(&th_zone->therm_dev->lock);
- /* Find the level for which trip happened */
- for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
- if (th_zone->therm_dev->last_temperature <
- th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
- break;
- }
-
- if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
- !th_zone->sensor_conf->trip_data.trigger_falling) {
- if (i > 0)
- th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
- else
- th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
- }
-
- snprintf(data, sizeof(data), "%u", i);
- kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
- mutex_unlock(&th_zone->therm_dev->lock);
-}
-
-/* Register with the in-kernel thermal management */
-static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
-{
- int ret;
- struct cpumask mask_val;
-
- if (!sensor_conf || !sensor_conf->read_temperature) {
- pr_err("Temperature sensor not initialised\n");
- return -EINVAL;
- }
-
- th_zone = kzalloc(sizeof(struct exynos_thermal_zone), GFP_KERNEL);
- if (!th_zone)
- return -ENOMEM;
-
- th_zone->sensor_conf = sensor_conf;
- cpumask_set_cpu(0, &mask_val);
- th_zone->cool_dev[0] = cpufreq_cooling_register(&mask_val);
- if (IS_ERR(th_zone->cool_dev[0])) {
- pr_err("Failed to register cpufreq cooling device\n");
- ret = -EINVAL;
- goto err_unregister;
- }
- th_zone->cool_dev_size++;
-
- th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
- EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0,
- sensor_conf->trip_data.trigger_falling ?
- 0 : IDLE_INTERVAL);
-
- if (IS_ERR(th_zone->therm_dev)) {
- pr_err("Failed to register thermal zone device\n");
- ret = PTR_ERR(th_zone->therm_dev);
- goto err_unregister;
- }
- th_zone->mode = THERMAL_DEVICE_ENABLED;
-
- pr_info("Exynos: Kernel Thermal management registered\n");
-
- return 0;
-
-err_unregister:
- exynos_unregister_thermal();
- return ret;
-}
-
-/* Un-Register with the in-kernel thermal management */
-static void exynos_unregister_thermal(void)
-{
- int i;
-
- if (!th_zone)
- return;
-
- if (th_zone->therm_dev)
- thermal_zone_device_unregister(th_zone->therm_dev);
-
- for (i = 0; i < th_zone->cool_dev_size; i++) {
- if (th_zone->cool_dev[i])
- cpufreq_cooling_unregister(th_zone->cool_dev[i]);
- }
-
- kfree(th_zone);
- pr_info("Exynos: Kernel Thermal management unregistered\n");
-}
-
-/*
- * TMU treats temperature as a mapped temperature code.
- * The temperature is converted differently depending on the calibration type.
- */
-static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
-{
- struct exynos_tmu_platform_data *pdata = data->pdata;
- int temp_code;
-
- if (data->soc == SOC_ARCH_EXYNOS4210)
- /* temp should range between 25 and 125 */
- if (temp < 25 || temp > 125) {
- temp_code = -EINVAL;
- goto out;
- }
-
- switch (pdata->cal_type) {
- case TYPE_TWO_POINT_TRIMMING:
- temp_code = (temp - 25) *
- (data->temp_error2 - data->temp_error1) /
- (85 - 25) + data->temp_error1;
- break;
- case TYPE_ONE_POINT_TRIMMING:
- temp_code = temp + data->temp_error1 - 25;
- break;
- default:
- temp_code = temp + EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET;
- break;
- }
-out:
- return temp_code;
-}
-
-/*
- * Calculate a temperature value from a temperature code.
- * The unit of the temperature is degree Celsius.
- */
-static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
-{
- struct exynos_tmu_platform_data *pdata = data->pdata;
- int temp;
-
- if (data->soc == SOC_ARCH_EXYNOS4210)
- /* temp_code should range between 75 and 175 */
- if (temp_code < 75 || temp_code > 175) {
- temp = -ENODATA;
- goto out;
- }
-
- switch (pdata->cal_type) {
- case TYPE_TWO_POINT_TRIMMING:
- temp = (temp_code - data->temp_error1) * (85 - 25) /
- (data->temp_error2 - data->temp_error1) + 25;
- break;
- case TYPE_ONE_POINT_TRIMMING:
- temp = temp_code - data->temp_error1 + 25;
- break;
- default:
- temp = temp_code - EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET;
- break;
- }
-out:
- return temp;
-}
-
-static int exynos_tmu_initialize(struct platform_device *pdev)
-{
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct exynos_tmu_platform_data *pdata = data->pdata;
- unsigned int status, trim_info;
- unsigned int rising_threshold = 0, falling_threshold = 0;
- int ret = 0, threshold_code, i, trigger_levs = 0;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
-
- status = readb(data->base + EXYNOS_TMU_REG_STATUS);
- if (!status) {
- ret = -EBUSY;
- goto out;
- }
-
- if (data->soc == SOC_ARCH_EXYNOS) {
- __raw_writel(EXYNOS_TRIMINFO_RELOAD,
- data->base + EXYNOS_TMU_TRIMINFO_CON);
- }
- /* Save trimming info in order to perform calibration */
- trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
- data->temp_error1 = trim_info & EXYNOS_TMU_TRIM_TEMP_MASK;
- data->temp_error2 = ((trim_info >> 8) & EXYNOS_TMU_TRIM_TEMP_MASK);
-
- if ((EFUSE_MIN_VALUE > data->temp_error1) ||
- (data->temp_error1 > EFUSE_MAX_VALUE) ||
- (data->temp_error2 != 0))
- data->temp_error1 = pdata->efuse_value;
-
- /* Count trigger levels to be enabled */
- for (i = 0; i < MAX_THRESHOLD_LEVS; i++)
- if (pdata->trigger_levels[i])
- trigger_levs++;
-
- if (data->soc == SOC_ARCH_EXYNOS4210) {
- /* Write temperature code for threshold */
- threshold_code = temp_to_code(data, pdata->threshold);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- writeb(threshold_code,
- data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
- for (i = 0; i < trigger_levs; i++)
- writeb(pdata->trigger_levels[i],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
-
- writel(EXYNOS4210_TMU_INTCLEAR_VAL,
- data->base + EXYNOS_TMU_REG_INTCLEAR);
- } else if (data->soc == SOC_ARCH_EXYNOS) {
- /* Write temperature code for rising and falling threshold */
- for (i = 0; i < trigger_levs; i++) {
- threshold_code = temp_to_code(data,
- pdata->trigger_levels[i]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- rising_threshold |= threshold_code << 8 * i;
- if (pdata->threshold_falling) {
- threshold_code = temp_to_code(data,
- pdata->trigger_levels[i] -
- pdata->threshold_falling);
- if (threshold_code > 0)
- falling_threshold |=
- threshold_code << 8 * i;
- }
- }
-
- writel(rising_threshold,
- data->base + EXYNOS_THD_TEMP_RISE);
- writel(falling_threshold,
- data->base + EXYNOS_THD_TEMP_FALL);
-
- writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
- data->base + EXYNOS_TMU_REG_INTCLEAR);
- }
-out:
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static void exynos_tmu_control(struct platform_device *pdev, bool on)
-{
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct exynos_tmu_platform_data *pdata = data->pdata;
- unsigned int con, interrupt_en;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
-
- con = pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT |
- pdata->gain << EXYNOS_TMU_GAIN_SHIFT;
-
- if (data->soc == SOC_ARCH_EXYNOS) {
- con |= pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT;
- con |= (EXYNOS_MUX_ADDR_VALUE << EXYNOS_MUX_ADDR_SHIFT);
- }
-
- if (on) {
- con |= EXYNOS_TMU_CORE_ON;
- interrupt_en = pdata->trigger_level3_en << 12 |
- pdata->trigger_level2_en << 8 |
- pdata->trigger_level1_en << 4 |
- pdata->trigger_level0_en;
- if (pdata->threshold_falling)
- interrupt_en |= interrupt_en << 16;
- } else {
- con |= EXYNOS_TMU_CORE_OFF;
- interrupt_en = 0; /* Disable all interrupts */
- }
- writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
- writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
-
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
-}
-
-static int exynos_tmu_read(struct exynos_tmu_data *data)
-{
- u8 temp_code;
- int temp;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
-
- temp_code = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
- temp = code_to_temp(data, temp_code);
-
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
-
- return temp;
-}
-
-#ifdef CONFIG_THERMAL_EMULATION
-static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
-{
- struct exynos_tmu_data *data = drv_data;
- unsigned int reg;
- int ret = -EINVAL;
-
- if (data->soc == SOC_ARCH_EXYNOS4210)
- goto out;
-
- if (temp && temp < MCELSIUS)
- goto out;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
-
- reg = readl(data->base + EXYNOS_EMUL_CON);
-
- if (temp) {
- temp /= MCELSIUS;
-
- reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
- (temp_to_code(data, temp)
- << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
- } else {
- reg &= ~EXYNOS_EMUL_ENABLE;
- }
-
- writel(reg, data->base + EXYNOS_EMUL_CON);
-
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
- return 0;
-out:
- return ret;
-}
-#else
-static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
- { return -EINVAL; }
-#endif/*CONFIG_THERMAL_EMULATION*/
-
-static void exynos_tmu_work(struct work_struct *work)
-{
- struct exynos_tmu_data *data = container_of(work,
- struct exynos_tmu_data, irq_work);
-
- exynos_report_trigger();
- mutex_lock(&data->lock);
- clk_enable(data->clk);
- if (data->soc == SOC_ARCH_EXYNOS)
- writel(EXYNOS_TMU_CLEAR_RISE_INT |
- EXYNOS_TMU_CLEAR_FALL_INT,
- data->base + EXYNOS_TMU_REG_INTCLEAR);
- else
- writel(EXYNOS4210_TMU_INTCLEAR_VAL,
- data->base + EXYNOS_TMU_REG_INTCLEAR);
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
-
- enable_irq(data->irq);
-}
-
-static irqreturn_t exynos_tmu_irq(int irq, void *id)
-{
- struct exynos_tmu_data *data = id;
-
- disable_irq_nosync(irq);
- schedule_work(&data->irq_work);
-
- return IRQ_HANDLED;
-}
-static struct thermal_sensor_conf exynos_sensor_conf = {
- .name = "exynos-therm",
- .read_temperature = (int (*)(void *))exynos_tmu_read,
- .write_emul_temp = exynos_tmu_set_emulation,
-};
-
-#if defined(CONFIG_CPU_EXYNOS4210)
-static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
- .threshold = 80,
- .trigger_levels[0] = 5,
- .trigger_levels[1] = 20,
- .trigger_levels[2] = 30,
- .trigger_level0_en = 1,
- .trigger_level1_en = 1,
- .trigger_level2_en = 1,
- .trigger_level3_en = 0,
- .gain = 15,
- .reference_voltage = 7,
- .cal_type = TYPE_ONE_POINT_TRIMMING,
- .freq_tab[0] = {
- .freq_clip_max = 800 * 1000,
- .temp_level = 85,
- },
- .freq_tab[1] = {
- .freq_clip_max = 200 * 1000,
- .temp_level = 100,
- },
- .freq_tab_count = 2,
- .type = SOC_ARCH_EXYNOS4210,
-};
-#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
-#else
-#define EXYNOS4210_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) || \
- defined(CONFIG_SOC_EXYNOS4212)
-static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
- .threshold_falling = 10,
- .trigger_levels[0] = 85,
- .trigger_levels[1] = 103,
- .trigger_levels[2] = 110,
- .trigger_level0_en = 1,
- .trigger_level1_en = 1,
- .trigger_level2_en = 1,
- .trigger_level3_en = 0,
- .gain = 8,
- .reference_voltage = 16,
- .noise_cancel_mode = 4,
- .cal_type = TYPE_ONE_POINT_TRIMMING,
- .efuse_value = 55,
- .freq_tab[0] = {
- .freq_clip_max = 800 * 1000,
- .temp_level = 85,
- },
- .freq_tab[1] = {
- .freq_clip_max = 200 * 1000,
- .temp_level = 103,
- },
- .freq_tab_count = 2,
- .type = SOC_ARCH_EXYNOS,
-};
-#define EXYNOS_TMU_DRV_DATA (&exynos_default_tmu_data)
-#else
-#define EXYNOS_TMU_DRV_DATA (NULL)
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id exynos_tmu_match[] = {
- {
- .compatible = "samsung,exynos4210-tmu",
- .data = (void *)EXYNOS4210_TMU_DRV_DATA,
- },
- {
- .compatible = "samsung,exynos4412-tmu",
- .data = (void *)EXYNOS_TMU_DRV_DATA,
- },
- {
- .compatible = "samsung,exynos5250-tmu",
- .data = (void *)EXYNOS_TMU_DRV_DATA,
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, exynos_tmu_match);
-#endif
-
-static struct platform_device_id exynos_tmu_driver_ids[] = {
- {
- .name = "exynos4210-tmu",
- .driver_data = (kernel_ulong_t)EXYNOS4210_TMU_DRV_DATA,
- },
- {
- .name = "exynos5250-tmu",
- .driver_data = (kernel_ulong_t)EXYNOS_TMU_DRV_DATA,
- },
- { },
-};
-MODULE_DEVICE_TABLE(platform, exynos_tmu_driver_ids);
-
-static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
- struct platform_device *pdev)
-{
-#ifdef CONFIG_OF
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
- if (!match)
- return NULL;
- return (struct exynos_tmu_platform_data *) match->data;
- }
-#endif
- return (struct exynos_tmu_platform_data *)
- platform_get_device_id(pdev)->driver_data;
-}
-
-static int exynos_tmu_probe(struct platform_device *pdev)
-{
- struct exynos_tmu_data *data;
- struct exynos_tmu_platform_data *pdata = pdev->dev.platform_data;
- int ret, i;
-
- if (!pdata)
- pdata = exynos_get_driver_data(pdev);
-
- if (!pdata) {
- dev_err(&pdev->dev, "No platform init data supplied.\n");
- return -ENODEV;
- }
- data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
- GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev, "Failed to allocate driver structure\n");
- return -ENOMEM;
- }
-
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "Failed to get platform irq\n");
- return data->irq;
- }
-
- INIT_WORK(&data->irq_work, exynos_tmu_work);
-
- data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, data->mem);
- if (IS_ERR(data->base))
- return PTR_ERR(data->base);
-
- ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
- IRQF_TRIGGER_RISING, "exynos-tmu", data);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
- return ret;
- }
-
- data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
- if (IS_ERR(data->clk)) {
- dev_err(&pdev->dev, "Failed to get clock\n");
- return PTR_ERR(data->clk);
- }
-
- ret = clk_prepare(data->clk);
- if (ret)
- return ret;
-
- if (pdata->type == SOC_ARCH_EXYNOS ||
- pdata->type == SOC_ARCH_EXYNOS4210)
- data->soc = pdata->type;
- else {
- ret = -EINVAL;
- dev_err(&pdev->dev, "Platform not supported\n");
- goto err_clk;
- }
-
- data->pdata = pdata;
- platform_set_drvdata(pdev, data);
- mutex_init(&data->lock);
-
- ret = exynos_tmu_initialize(pdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to initialize TMU\n");
- goto err_clk;
- }
-
- exynos_tmu_control(pdev, true);
-
- /* Register the sensor with thermal management interface */
- (&exynos_sensor_conf)->private_data = data;
- exynos_sensor_conf.trip_data.trip_count = pdata->trigger_level0_en +
- pdata->trigger_level1_en + pdata->trigger_level2_en +
- pdata->trigger_level3_en;
-
- for (i = 0; i < exynos_sensor_conf.trip_data.trip_count; i++)
- exynos_sensor_conf.trip_data.trip_val[i] =
- pdata->threshold + pdata->trigger_levels[i];
-
- exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling;
-
- exynos_sensor_conf.cooling_data.freq_clip_count =
- pdata->freq_tab_count;
- for (i = 0; i < pdata->freq_tab_count; i++) {
- exynos_sensor_conf.cooling_data.freq_data[i].freq_clip_max =
- pdata->freq_tab[i].freq_clip_max;
- exynos_sensor_conf.cooling_data.freq_data[i].temp_level =
- pdata->freq_tab[i].temp_level;
- }
-
- ret = exynos_register_thermal(&exynos_sensor_conf);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register thermal interface\n");
- goto err_clk;
- }
-
- return 0;
-err_clk:
- clk_unprepare(data->clk);
- return ret;
-}
-
-static int exynos_tmu_remove(struct platform_device *pdev)
-{
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-
- exynos_tmu_control(pdev, false);
-
- exynos_unregister_thermal();
-
- clk_unprepare(data->clk);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos_tmu_suspend(struct device *dev)
-{
- exynos_tmu_control(to_platform_device(dev), false);
-
- return 0;
-}
-
-static int exynos_tmu_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- exynos_tmu_initialize(pdev);
- exynos_tmu_control(pdev, true);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
- exynos_tmu_suspend, exynos_tmu_resume);
-#define EXYNOS_TMU_PM (&exynos_tmu_pm)
-#else
-#define EXYNOS_TMU_PM NULL
-#endif
-
-static struct platform_driver exynos_tmu_driver = {
- .driver = {
- .name = "exynos-tmu",
- .owner = THIS_MODULE,
- .pm = EXYNOS_TMU_PM,
- .of_match_table = of_match_ptr(exynos_tmu_match),
- },
- .probe = exynos_tmu_probe,
- .remove = exynos_tmu_remove,
- .id_table = exynos_tmu_driver_ids,
-};
-
-module_platform_driver(exynos_tmu_driver);
-
-MODULE_DESCRIPTION("EXYNOS TMU Driver");
-MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:exynos-tmu");
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
new file mode 100644
index 0000000..1d6c801
--- /dev/null
+++ b/drivers/thermal/imx_thermal.c
@@ -0,0 +1,541 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/cpu_cooling.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/types.h>
+
+#define REG_SET 0x4
+#define REG_CLR 0x8
+#define REG_TOG 0xc
+
+#define MISC0 0x0150
+#define MISC0_REFTOP_SELBIASOFF (1 << 3)
+
+#define TEMPSENSE0 0x0180
+#define TEMPSENSE0_ALARM_VALUE_SHIFT 20
+#define TEMPSENSE0_ALARM_VALUE_MASK (0xfff << TEMPSENSE0_ALARM_VALUE_SHIFT)
+#define TEMPSENSE0_TEMP_CNT_SHIFT 8
+#define TEMPSENSE0_TEMP_CNT_MASK (0xfff << TEMPSENSE0_TEMP_CNT_SHIFT)
+#define TEMPSENSE0_FINISHED (1 << 2)
+#define TEMPSENSE0_MEASURE_TEMP (1 << 1)
+#define TEMPSENSE0_POWER_DOWN (1 << 0)
+
+#define TEMPSENSE1 0x0190
+#define TEMPSENSE1_MEASURE_FREQ 0xffff
+
+#define OCOTP_ANA1 0x04e0
+
+/* The driver supports 1 passive trip point and 1 critical trip point */
+enum imx_thermal_trip {
+ IMX_TRIP_PASSIVE,
+ IMX_TRIP_CRITICAL,
+ IMX_TRIP_NUM,
+};
+
+/*
+ * It defines the temperature in millicelsius for passive trip point
+ * that will trigger cooling action when crossed.
+ */
+#define IMX_TEMP_PASSIVE 85000
+
+#define IMX_POLLING_DELAY 2000 /* millisecond */
+#define IMX_PASSIVE_DELAY 1000
+
+struct imx_thermal_data {
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+ enum thermal_device_mode mode;
+ struct regmap *tempmon;
+ int c1, c2; /* See formula in imx_get_sensor_data() */
+ unsigned long temp_passive;
+ unsigned long temp_critical;
+ unsigned long alarm_temp;
+ unsigned long last_temp;
+ bool irq_enabled;
+ int irq;
+};
+
+static void imx_set_alarm_temp(struct imx_thermal_data *data,
+ signed long alarm_temp)
+{
+ struct regmap *map = data->tempmon;
+ int alarm_value;
+
+ data->alarm_temp = alarm_temp;
+ alarm_value = (alarm_temp - data->c2) / data->c1;
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_ALARM_VALUE_MASK);
+ regmap_write(map, TEMPSENSE0 + REG_SET, alarm_value <<
+ TEMPSENSE0_ALARM_VALUE_SHIFT);
+}
+
+static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
+{
+ struct imx_thermal_data *data = tz->devdata;
+ struct regmap *map = data->tempmon;
+ unsigned int n_meas;
+ bool wait;
+ u32 val;
+
+ if (data->mode == THERMAL_DEVICE_ENABLED) {
+ /* Check if a measurement is currently in progress */
+ regmap_read(map, TEMPSENSE0, &val);
+ wait = !(val & TEMPSENSE0_FINISHED);
+ } else {
+ /*
+ * Every time we measure the temperature, we will power on the
+ * temperature sensor, enable measurements, take a reading,
+ * disable measurements, power off the temperature sensor.
+ */
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+
+ wait = true;
+ }
+
+ /*
+ * According to the temp sensor designers, it may require up to ~17us
+ * to complete a measurement.
+ */
+ if (wait)
+ usleep_range(20, 50);
+
+ regmap_read(map, TEMPSENSE0, &val);
+
+ if (data->mode != THERMAL_DEVICE_ENABLED) {
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+ }
+
+ if ((val & TEMPSENSE0_FINISHED) == 0) {
+ dev_dbg(&tz->device, "temp measurement never finished\n");
+ return -EAGAIN;
+ }
+
+ n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
+
+ /* See imx_get_sensor_data() for formula derivation */
+ *temp = data->c2 + data->c1 * n_meas;
+
+ /* Update alarm value to next higher trip point */
+ if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive)
+ imx_set_alarm_temp(data, data->temp_critical);
+ if (data->alarm_temp == data->temp_critical && *temp < data->temp_passive) {
+ imx_set_alarm_temp(data, data->temp_passive);
+ dev_dbg(&tz->device, "thermal alarm off: T < %lu\n",
+ data->alarm_temp / 1000);
+ }
+
+ if (*temp != data->last_temp) {
+ dev_dbg(&tz->device, "millicelsius: %ld\n", *temp);
+ data->last_temp = *temp;
+ }
+
+ /* Reenable alarm IRQ if temperature below alarm temperature */
+ if (!data->irq_enabled && *temp < data->alarm_temp) {
+ data->irq_enabled = true;
+ enable_irq(data->irq);
+ }
+
+ return 0;
+}
+
+static int imx_get_mode(struct thermal_zone_device *tz,
+ enum thermal_device_mode *mode)
+{
+ struct imx_thermal_data *data = tz->devdata;
+
+ *mode = data->mode;
+
+ return 0;
+}
+
+static int imx_set_mode(struct thermal_zone_device *tz,
+ enum thermal_device_mode mode)
+{
+ struct imx_thermal_data *data = tz->devdata;
+ struct regmap *map = data->tempmon;
+
+ if (mode == THERMAL_DEVICE_ENABLED) {
+ tz->polling_delay = IMX_POLLING_DELAY;
+ tz->passive_delay = IMX_PASSIVE_DELAY;
+
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+
+ if (!data->irq_enabled) {
+ data->irq_enabled = true;
+ enable_irq(data->irq);
+ }
+ } else {
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+
+ tz->polling_delay = 0;
+ tz->passive_delay = 0;
+
+ if (data->irq_enabled) {
+ disable_irq(data->irq);
+ data->irq_enabled = false;
+ }
+ }
+
+ data->mode = mode;
+ thermal_zone_device_update(tz);
+
+ return 0;
+}
+
+static int imx_get_trip_type(struct thermal_zone_device *tz, int trip,
+ enum thermal_trip_type *type)
+{
+ *type = (trip == IMX_TRIP_PASSIVE) ? THERMAL_TRIP_PASSIVE :
+ THERMAL_TRIP_CRITICAL;
+ return 0;
+}
+
+static int imx_get_crit_temp(struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ struct imx_thermal_data *data = tz->devdata;
+
+ *temp = data->temp_critical;
+ return 0;
+}
+
+static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip,
+ unsigned long *temp)
+{
+ struct imx_thermal_data *data = tz->devdata;
+
+ *temp = (trip == IMX_TRIP_PASSIVE) ? data->temp_passive :
+ data->temp_critical;
+ return 0;
+}
+
+static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
+ unsigned long temp)
+{
+ struct imx_thermal_data *data = tz->devdata;
+
+ if (trip == IMX_TRIP_CRITICAL)
+ return -EPERM;
+
+ if (temp > IMX_TEMP_PASSIVE)
+ return -EINVAL;
+
+ data->temp_passive = temp;
+
+ imx_set_alarm_temp(data, temp);
+
+ return 0;
+}
+
+static int imx_bind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
+{
+ int ret;
+
+ ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT);
+ if (ret) {
+ dev_err(&tz->device,
+ "binding zone %s with cdev %s failed:%d\n",
+ tz->type, cdev->type, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_unbind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
+{
+ int ret;
+
+ ret = thermal_zone_unbind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev);
+ if (ret) {
+ dev_err(&tz->device,
+ "unbinding zone %s with cdev %s failed:%d\n",
+ tz->type, cdev->type, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct thermal_zone_device_ops imx_tz_ops = {
+ .bind = imx_bind,
+ .unbind = imx_unbind,
+ .get_temp = imx_get_temp,
+ .get_mode = imx_get_mode,
+ .set_mode = imx_set_mode,
+ .get_trip_type = imx_get_trip_type,
+ .get_trip_temp = imx_get_trip_temp,
+ .get_crit_temp = imx_get_crit_temp,
+ .set_trip_temp = imx_set_trip_temp,
+};
+
+static int imx_get_sensor_data(struct platform_device *pdev)
+{
+ struct imx_thermal_data *data = platform_get_drvdata(pdev);
+ struct regmap *map;
+ int t1, t2, n1, n2;
+ int ret;
+ u32 val;
+
+ map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "fsl,tempmon-data");
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ dev_err(&pdev->dev, "failed to get sensor regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(map, OCOTP_ANA1, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret);
+ return ret;
+ }
+
+ if (val == 0 || val == ~0) {
+ dev_err(&pdev->dev, "invalid sensor calibration data\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Sensor data layout:
+ * [31:20] - sensor value @ 25C
+ * [19:8] - sensor value of hot
+ * [7:0] - hot temperature value
+ */
+ n1 = val >> 20;
+ n2 = (val & 0xfff00) >> 8;
+ t2 = val & 0xff;
+ t1 = 25; /* t1 always 25C */
+
+ /*
+ * Derived from linear interpolation,
+ * Tmeas = T2 + (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+ * We want to reduce this down to the minimum computation necessary
+ * for each temperature read. Also, we want Tmeas in millicelsius
+ * and we don't want to lose precision from integer division. So...
+ * milli_Tmeas = 1000 * T2 + 1000 * (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+ * Let constant c1 = 1000 * (T1 - T2) / (N1 - N2)
+ * milli_Tmeas = (1000 * T2) + c1 * (Nmeas - N2)
+ * milli_Tmeas = (1000 * T2) + (c1 * Nmeas) - (c1 * N2)
+ * Let constant c2 = (1000 * T2) - (c1 * N2)
+ * milli_Tmeas = c2 + (c1 * Nmeas)
+ */
+ data->c1 = 1000 * (t1 - t2) / (n1 - n2);
+ data->c2 = 1000 * t2 - data->c1 * n2;
+
+ /*
+ * Set the default passive cooling trip point to 20 °C below the
+ * maximum die temperature. Can be changed from userspace.
+ */
+ data->temp_passive = 1000 * (t2 - 20);
+
+ /*
+ * The maximum die temperature is t2, let's give 5 °C cushion
+ * for noise and possible temperature rise between measurements.
+ */
+ data->temp_critical = 1000 * (t2 - 5);
+
+ return 0;
+}
+
+static irqreturn_t imx_thermal_alarm_irq(int irq, void *dev)
+{
+ struct imx_thermal_data *data = dev;
+
+ disable_irq_nosync(irq);
+ data->irq_enabled = false;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev)
+{
+ struct imx_thermal_data *data = dev;
+
+ dev_dbg(&data->tz->device, "THERMAL ALARM: T > %lu\n",
+ data->alarm_temp / 1000);
+
+ thermal_zone_device_update(data->tz);
+
+ return IRQ_HANDLED;
+}
+
+static int imx_thermal_probe(struct platform_device *pdev)
+{
+ struct imx_thermal_data *data;
+ struct cpumask clip_cpus;
+ struct regmap *map;
+ int measure_freq;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon");
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ dev_err(&pdev->dev, "failed to get tempmon regmap: %d\n", ret);
+ return ret;
+ }
+ data->tempmon = map;
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, data->irq,
+ imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
+ 0, "imx_thermal", data);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ ret = imx_get_sensor_data(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get sensor data\n");
+ return ret;
+ }
+
+ /* Make sure sensor is in known good state for measurements */
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
+ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
+ regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+
+ cpumask_set_cpu(0, &clip_cpus);
+ data->cdev = cpufreq_cooling_register(&clip_cpus);
+ if (IS_ERR(data->cdev)) {
+ ret = PTR_ERR(data->cdev);
+ dev_err(&pdev->dev,
+ "failed to register cpufreq cooling device: %d\n", ret);
+ return ret;
+ }
+
+ data->tz = thermal_zone_device_register("imx_thermal_zone",
+ IMX_TRIP_NUM,
+ BIT(IMX_TRIP_PASSIVE), data,
+ &imx_tz_ops, NULL,
+ IMX_PASSIVE_DELAY,
+ IMX_POLLING_DELAY);
+ if (IS_ERR(data->tz)) {
+ ret = PTR_ERR(data->tz);
+ dev_err(&pdev->dev,
+ "failed to register thermal zone device %d\n", ret);
+ cpufreq_cooling_unregister(data->cdev);
+ return ret;
+ }
+
+ /* Enable measurements at ~ 10 Hz */
+ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
+ measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
+ regmap_write(map, TEMPSENSE1 + REG_SET, measure_freq);
+ imx_set_alarm_temp(data, data->temp_passive);
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+
+ data->irq_enabled = true;
+ data->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+}
+
+static int imx_thermal_remove(struct platform_device *pdev)
+{
+ struct imx_thermal_data *data = platform_get_drvdata(pdev);
+ struct regmap *map = data->tempmon;
+
+ /* Disable measurements */
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+
+ thermal_zone_device_unregister(data->tz);
+ cpufreq_cooling_unregister(data->cdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int imx_thermal_suspend(struct device *dev)
+{
+ struct imx_thermal_data *data = dev_get_drvdata(dev);
+ struct regmap *map = data->tempmon;
+ u32 val;
+
+ regmap_read(map, TEMPSENSE0, &val);
+ if ((val & TEMPSENSE0_POWER_DOWN) == 0) {
+ /*
+ * If a measurement is taking place, wait for a long enough
+ * time for it to finish, and then check again. If it still
+ * does not finish, something must go wrong.
+ */
+ udelay(50);
+ regmap_read(map, TEMPSENSE0, &val);
+ if ((val & TEMPSENSE0_POWER_DOWN) == 0)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int imx_thermal_resume(struct device *dev)
+{
+ /* Nothing to do for now */
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops,
+ imx_thermal_suspend, imx_thermal_resume);
+
+static const struct of_device_id of_imx_thermal_match[] = {
+ { .compatible = "fsl,imx6q-tempmon", },
+ { /* end */ }
+};
+
+static struct platform_driver imx_thermal = {
+ .driver = {
+ .name = "imx_thermal",
+ .owner = THIS_MODULE,
+ .pm = &imx_thermal_pm_ops,
+ .of_match_table = of_imx_thermal_match,
+ },
+ .probe = imx_thermal_probe,
+ .remove = imx_thermal_remove,
+};
+module_platform_driver(imx_thermal);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Thermal driver for Freescale i.MX SoCs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx-thermal");
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
new file mode 100644
index 0000000..f760389
--- /dev/null
+++ b/drivers/thermal/samsung/Kconfig
@@ -0,0 +1,18 @@
+config EXYNOS_THERMAL
+ tristate "Exynos thermal management unit driver"
+ depends on ARCH_HAS_BANDGAP && OF
+ help
+ If you say yes here you get support for the TMU (Thermal Management
+ Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
+ the TMU, reports temperature and handles cooling action if defined.
+ This driver uses the Exynos core thermal APIs and TMU configuration
+ data from the supported SoCs.
+
+config EXYNOS_THERMAL_CORE
+ bool "Core thermal framework support for EXYNOS SOCs"
+ depends on EXYNOS_THERMAL
+ help
+ If you say yes here you get support for EXYNOS TMU
+ (Thermal Management Unit) common registration/unregistration
+ functions to the core thermal layer and also to use the generic
+ CPU cooling APIs.
diff --git a/drivers/thermal/samsung/Makefile b/drivers/thermal/samsung/Makefile
new file mode 100644
index 0000000..c09d830
--- /dev/null
+++ b/drivers/thermal/samsung/Makefile
@@ -0,0 +1,7 @@
+#
+# Samsung thermal specific Makefile
+#
+obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
+exynos_thermal-y := exynos_tmu.o
+exynos_thermal-y += exynos_tmu_data.o
+exynos_thermal-$(CONFIG_EXYNOS_THERMAL_CORE) += exynos_thermal_common.o
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
new file mode 100644
index 0000000..f10a6ad
--- /dev/null
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -0,0 +1,432 @@
+/*
+ * exynos_thermal_common.c - Samsung EXYNOS common thermal file
+ *
+ * Copyright (C) 2013 Samsung Electronics
+ * Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/cpu_cooling.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#include "exynos_thermal_common.h"
+
+struct exynos_thermal_zone {
+ enum thermal_device_mode mode;
+ struct thermal_zone_device *therm_dev;
+ struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
+ unsigned int cool_dev_size;
+ struct platform_device *exynos4_dev;
+ struct thermal_sensor_conf *sensor_conf;
+ bool bind;
+};
+
+/* Get mode callback functions for thermal zone */
+static int exynos_get_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode *mode)
+{
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+ if (th_zone)
+ *mode = th_zone->mode;
+ return 0;
+}
+
+/* Set mode callback functions for thermal zone */
+static int exynos_set_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode mode)
+{
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+ if (!th_zone) {
+ dev_err(&thermal->device,
+ "thermal zone not registered\n");
+ return 0;
+ }
+
+ mutex_lock(&thermal->lock);
+
+ if (mode == THERMAL_DEVICE_ENABLED &&
+ !th_zone->sensor_conf->trip_data.trigger_falling)
+ thermal->polling_delay = IDLE_INTERVAL;
+ else
+ thermal->polling_delay = 0;
+
+ mutex_unlock(&thermal->lock);
+
+ th_zone->mode = mode;
+ thermal_zone_device_update(thermal);
+ dev_dbg(th_zone->sensor_conf->dev,
+ "thermal polling set for duration=%d msec\n",
+ thermal->polling_delay);
+ return 0;
+}
+
+
+/* Get trip type callback functions for thermal zone */
+static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
+ enum thermal_trip_type *type)
+{
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+ int max_trip = th_zone->sensor_conf->trip_data.trip_count;
+ int trip_type;
+
+ if (trip < 0 || trip >= max_trip)
+ return -EINVAL;
+
+ trip_type = th_zone->sensor_conf->trip_data.trip_type[trip];
+
+ if (trip_type == SW_TRIP)
+ *type = THERMAL_TRIP_CRITICAL;
+ else if (trip_type == THROTTLE_ACTIVE)
+ *type = THERMAL_TRIP_ACTIVE;
+ else if (trip_type == THROTTLE_PASSIVE)
+ *type = THERMAL_TRIP_PASSIVE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Get trip temperature callback functions for thermal zone */
+static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
+ unsigned long *temp)
+{
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+ int max_trip = th_zone->sensor_conf->trip_data.trip_count;
+
+ if (trip < 0 || trip >= max_trip)
+ return -EINVAL;
+
+ *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
+ /* convert the temperature into millicelsius */
+ *temp = *temp * MCELSIUS;
+
+ return 0;
+}
+
+/* Get critical temperature callback functions for thermal zone */
+static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+ int max_trip = th_zone->sensor_conf->trip_data.trip_count;
+ /* Get the temp of highest trip*/
+ return exynos_get_trip_temp(thermal, max_trip - 1, temp);
+}
+
+/* Bind callback functions for thermal zone */
+static int exynos_bind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ int ret = 0, i, tab_size, level;
+ struct freq_clip_table *tab_ptr, *clip_data;
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+ struct thermal_sensor_conf *data = th_zone->sensor_conf;
+
+ tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
+ tab_size = data->cooling_data.freq_clip_count;
+
+ if (tab_ptr == NULL || tab_size == 0)
+ return 0;
+
+ /* find the cooling device registered*/
+ for (i = 0; i < th_zone->cool_dev_size; i++)
+ if (cdev == th_zone->cool_dev[i])
+ break;
+
+ /* No matching cooling device */
+ if (i == th_zone->cool_dev_size)
+ return 0;
+
+ /* Bind the thermal zone to the cpufreq cooling device */
+ for (i = 0; i < tab_size; i++) {
+ clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
+ level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
+ if (level == THERMAL_CSTATE_INVALID)
+ return 0;
+ switch (GET_ZONE(i)) {
+ case MONITOR_ZONE:
+ case WARN_ZONE:
+ if (thermal_zone_bind_cooling_device(thermal, i, cdev,
+ level, 0)) {
+ dev_err(data->dev,
+ "error unbinding cdev inst=%d\n", i);
+ ret = -EINVAL;
+ }
+ th_zone->bind = true;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/* Unbind callback functions for thermal zone */
+static int exynos_unbind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ int ret = 0, i, tab_size;
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+ struct thermal_sensor_conf *data = th_zone->sensor_conf;
+
+ if (th_zone->bind == false)
+ return 0;
+
+ tab_size = data->cooling_data.freq_clip_count;
+
+ if (tab_size == 0)
+ return 0;
+
+ /* find the cooling device registered*/
+ for (i = 0; i < th_zone->cool_dev_size; i++)
+ if (cdev == th_zone->cool_dev[i])
+ break;
+
+ /* No matching cooling device */
+ if (i == th_zone->cool_dev_size)
+ return 0;
+
+ /* Bind the thermal zone to the cpufreq cooling device */
+ for (i = 0; i < tab_size; i++) {
+ switch (GET_ZONE(i)) {
+ case MONITOR_ZONE:
+ case WARN_ZONE:
+ if (thermal_zone_unbind_cooling_device(thermal, i,
+ cdev)) {
+ dev_err(data->dev,
+ "error unbinding cdev inst=%d\n", i);
+ ret = -EINVAL;
+ }
+ th_zone->bind = false;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+ return ret;
+}
+
+/* Get temperature callback functions for thermal zone */
+static int exynos_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+ void *data;
+
+ if (!th_zone->sensor_conf) {
+ dev_err(&thermal->device,
+ "Temperature sensor not initialised\n");
+ return -EINVAL;
+ }
+ data = th_zone->sensor_conf->driver_data;
+ *temp = th_zone->sensor_conf->read_temperature(data);
+ /* convert the temperature into millicelsius */
+ *temp = *temp * MCELSIUS;
+ return 0;
+}
+
+/* Get temperature callback functions for thermal zone */
+static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
+ unsigned long temp)
+{
+ void *data;
+ int ret = -EINVAL;
+ struct exynos_thermal_zone *th_zone = thermal->devdata;
+
+ if (!th_zone->sensor_conf) {
+ dev_err(&thermal->device,
+ "Temperature sensor not initialised\n");
+ return -EINVAL;
+ }
+ data = th_zone->sensor_conf->driver_data;
+ if (th_zone->sensor_conf->write_emul_temp)
+ ret = th_zone->sensor_conf->write_emul_temp(data, temp);
+ return ret;
+}
+
+/* Get the temperature trend */
+static int exynos_get_trend(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trend *trend)
+{
+ int ret;
+ unsigned long trip_temp;
+
+ ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
+ if (ret < 0)
+ return ret;
+
+ if (thermal->temperature >= trip_temp)
+ *trend = THERMAL_TREND_RAISE_FULL;
+ else
+ *trend = THERMAL_TREND_DROP_FULL;
+
+ return 0;
+}
+/* Operation callback functions for thermal zone */
+static struct thermal_zone_device_ops const exynos_dev_ops = {
+ .bind = exynos_bind,
+ .unbind = exynos_unbind,
+ .get_temp = exynos_get_temp,
+ .set_emul_temp = exynos_set_emul_temp,
+ .get_trend = exynos_get_trend,
+ .get_mode = exynos_get_mode,
+ .set_mode = exynos_set_mode,
+ .get_trip_type = exynos_get_trip_type,
+ .get_trip_temp = exynos_get_trip_temp,
+ .get_crit_temp = exynos_get_crit_temp,
+};
+
+/*
+ * This function may be called from interrupt based temperature sensor
+ * when threshold is changed.
+ */
+void exynos_report_trigger(struct thermal_sensor_conf *conf)
+{
+ unsigned int i;
+ char data[10];
+ char *envp[] = { data, NULL };
+ struct exynos_thermal_zone *th_zone;
+
+ if (!conf || !conf->pzone_data) {
+ pr_err("Invalid temperature sensor configuration data\n");
+ return;
+ }
+
+ th_zone = conf->pzone_data;
+ if (th_zone->therm_dev)
+ return;
+
+ if (th_zone->bind == false) {
+ for (i = 0; i < th_zone->cool_dev_size; i++) {
+ if (!th_zone->cool_dev[i])
+ continue;
+ exynos_bind(th_zone->therm_dev,
+ th_zone->cool_dev[i]);
+ }
+ }
+
+ thermal_zone_device_update(th_zone->therm_dev);
+
+ mutex_lock(&th_zone->therm_dev->lock);
+ /* Find the level for which trip happened */
+ for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
+ if (th_zone->therm_dev->last_temperature <
+ th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
+ break;
+ }
+
+ if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
+ !th_zone->sensor_conf->trip_data.trigger_falling) {
+ if (i > 0)
+ th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
+ else
+ th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
+ }
+
+ snprintf(data, sizeof(data), "%u", i);
+ kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
+ mutex_unlock(&th_zone->therm_dev->lock);
+}
+
+/* Register with the in-kernel thermal management */
+int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
+{
+ int ret;
+ struct cpumask mask_val;
+ struct exynos_thermal_zone *th_zone;
+
+ if (!sensor_conf || !sensor_conf->read_temperature) {
+ pr_err("Temperature sensor not initialised\n");
+ return -EINVAL;
+ }
+
+ th_zone = devm_kzalloc(sensor_conf->dev,
+ sizeof(struct exynos_thermal_zone), GFP_KERNEL);
+ if (!th_zone)
+ return -ENOMEM;
+
+ th_zone->sensor_conf = sensor_conf;
+ /*
+ * TODO: 1) Handle multiple cooling devices in a thermal zone
+ * 2) Add a flag/name in cooling info to map to specific
+ * sensor
+ */
+ if (sensor_conf->cooling_data.freq_clip_count > 0) {
+ cpumask_set_cpu(0, &mask_val);
+ th_zone->cool_dev[th_zone->cool_dev_size] =
+ cpufreq_cooling_register(&mask_val);
+ if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
+ dev_err(sensor_conf->dev,
+ "Failed to register cpufreq cooling device\n");
+ ret = -EINVAL;
+ goto err_unregister;
+ }
+ th_zone->cool_dev_size++;
+ }
+
+ th_zone->therm_dev = thermal_zone_device_register(
+ sensor_conf->name, sensor_conf->trip_data.trip_count,
+ 0, th_zone, &exynos_dev_ops, NULL, 0,
+ sensor_conf->trip_data.trigger_falling ? 0 :
+ IDLE_INTERVAL);
+
+ if (IS_ERR(th_zone->therm_dev)) {
+ dev_err(sensor_conf->dev,
+ "Failed to register thermal zone device\n");
+ ret = PTR_ERR(th_zone->therm_dev);
+ goto err_unregister;
+ }
+ th_zone->mode = THERMAL_DEVICE_ENABLED;
+ sensor_conf->pzone_data = th_zone;
+
+ dev_info(sensor_conf->dev,
+ "Exynos: Thermal zone(%s) registered\n", sensor_conf->name);
+
+ return 0;
+
+err_unregister:
+ exynos_unregister_thermal(sensor_conf);
+ return ret;
+}
+
+/* Un-Register with the in-kernel thermal management */
+void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
+{
+ int i;
+ struct exynos_thermal_zone *th_zone;
+
+ if (!sensor_conf || !sensor_conf->pzone_data) {
+ pr_err("Invalid temperature sensor configuration data\n");
+ return;
+ }
+
+ th_zone = sensor_conf->pzone_data;
+
+ if (th_zone->therm_dev)
+ thermal_zone_device_unregister(th_zone->therm_dev);
+
+ for (i = 0; i < th_zone->cool_dev_size; i++) {
+ if (th_zone->cool_dev[i])
+ cpufreq_cooling_unregister(th_zone->cool_dev[i]);
+ }
+
+ dev_info(sensor_conf->dev,
+ "Exynos: Kernel Thermal management unregistered\n");
+}
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
new file mode 100644
index 0000000..3eb2ed9
--- /dev/null
+++ b/drivers/thermal/samsung/exynos_thermal_common.h
@@ -0,0 +1,107 @@
+/*
+ * exynos_thermal_common.h - Samsung EXYNOS common header file
+ *
+ * Copyright (C) 2013 Samsung Electronics
+ * Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _EXYNOS_THERMAL_COMMON_H
+#define _EXYNOS_THERMAL_COMMON_H
+
+/* In-kernel thermal framework related macros & definations */
+#define SENSOR_NAME_LEN 16
+#define MAX_TRIP_COUNT 8
+#define MAX_COOLING_DEVICE 4
+#define MAX_THRESHOLD_LEVS 5
+
+#define ACTIVE_INTERVAL 500
+#define IDLE_INTERVAL 10000
+#define MCELSIUS 1000
+
+/* CPU Zone information */
+#define PANIC_ZONE 4
+#define WARN_ZONE 3
+#define MONITOR_ZONE 2
+#define SAFE_ZONE 1
+
+#define GET_ZONE(trip) (trip + 2)
+#define GET_TRIP(zone) (zone - 2)
+
+enum trigger_type {
+ THROTTLE_ACTIVE = 1,
+ THROTTLE_PASSIVE,
+ SW_TRIP,
+ HW_TRIP,
+};
+
+/**
+ * struct freq_clip_table
+ * @freq_clip_max: maximum frequency allowed for this cooling state.
+ * @temp_level: Temperature level at which the temperature clipping will
+ * happen.
+ * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
+ *
+ * This structure is required to be filled and passed to the
+ * cpufreq_cooling_unregister function.
+ */
+struct freq_clip_table {
+ unsigned int freq_clip_max;
+ unsigned int temp_level;
+ const struct cpumask *mask_val;
+};
+
+struct thermal_trip_point_conf {
+ int trip_val[MAX_TRIP_COUNT];
+ int trip_type[MAX_TRIP_COUNT];
+ int trip_count;
+ unsigned char trigger_falling;
+};
+
+struct thermal_cooling_conf {
+ struct freq_clip_table freq_data[MAX_TRIP_COUNT];
+ int freq_clip_count;
+};
+
+struct thermal_sensor_conf {
+ char name[SENSOR_NAME_LEN];
+ int (*read_temperature)(void *data);
+ int (*write_emul_temp)(void *drv_data, unsigned long temp);
+ struct thermal_trip_point_conf trip_data;
+ struct thermal_cooling_conf cooling_data;
+ void *driver_data;
+ void *pzone_data;
+ struct device *dev;
+};
+
+/*Functions used exynos based thermal sensor driver*/
+#ifdef CONFIG_EXYNOS_THERMAL_CORE
+void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf);
+int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
+void exynos_report_trigger(struct thermal_sensor_conf *sensor_conf);
+#else
+static inline void
+exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) { return; }
+
+static inline int
+exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) { return 0; }
+
+static inline void
+exynos_report_trigger(struct thermal_sensor_conf *sensor_conf) { return; }
+
+#endif /* CONFIG_EXYNOS_THERMAL_CORE */
+#endif /* _EXYNOS_THERMAL_COMMON_H */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
new file mode 100644
index 0000000..b43afda
--- /dev/null
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -0,0 +1,762 @@
+/*
+ * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ * Amit Daniel Kachhap <amit.kachhap@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "exynos_thermal_common.h"
+#include "exynos_tmu.h"
+#include "exynos_tmu_data.h"
+
+/**
+ * struct exynos_tmu_data : A structure to hold the private data of the TMU
+ driver
+ * @id: identifier of the one instance of the TMU controller.
+ * @pdata: pointer to the tmu platform/configuration data
+ * @base: base address of the single instance of the TMU controller.
+ * @base_common: base address of the common registers of the TMU controller.
+ * @irq: irq number of the TMU controller.
+ * @soc: id of the SOC type.
+ * @irq_work: pointer to the irq work structure.
+ * @lock: lock to implement synchronization.
+ * @clk: pointer to the clock structure.
+ * @temp_error1: fused value of the first point trim.
+ * @temp_error2: fused value of the second point trim.
+ * @regulator: pointer to the TMU regulator structure.
+ * @reg_conf: pointer to structure to register with core thermal.
+ */
+struct exynos_tmu_data {
+ int id;
+ struct exynos_tmu_platform_data *pdata;
+ void __iomem *base;
+ void __iomem *base_common;
+ int irq;
+ enum soc_type soc;
+ struct work_struct irq_work;
+ struct mutex lock;
+ struct clk *clk;
+ u8 temp_error1, temp_error2;
+ struct regulator *regulator;
+ struct thermal_sensor_conf *reg_conf;
+};
+
+/*
+ * TMU treats temperature as a mapped temperature code.
+ * The temperature is converted differently depending on the calibration type.
+ */
+static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
+{
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ int temp_code;
+
+ if (pdata->cal_mode == HW_MODE)
+ return temp;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ /* temp should range between 25 and 125 */
+ if (temp < 25 || temp > 125) {
+ temp_code = -EINVAL;
+ goto out;
+ }
+
+ switch (pdata->cal_type) {
+ case TYPE_TWO_POINT_TRIMMING:
+ temp_code = (temp - pdata->first_point_trim) *
+ (data->temp_error2 - data->temp_error1) /
+ (pdata->second_point_trim - pdata->first_point_trim) +
+ data->temp_error1;
+ break;
+ case TYPE_ONE_POINT_TRIMMING:
+ temp_code = temp + data->temp_error1 - pdata->first_point_trim;
+ break;
+ default:
+ temp_code = temp + pdata->default_temp_offset;
+ break;
+ }
+out:
+ return temp_code;
+}
+
+/*
+ * Calculate a temperature value from a temperature code.
+ * The unit of the temperature is degree Celsius.
+ */
+static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
+{
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ int temp;
+
+ if (pdata->cal_mode == HW_MODE)
+ return temp_code;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ /* temp_code should range between 75 and 175 */
+ if (temp_code < 75 || temp_code > 175) {
+ temp = -ENODATA;
+ goto out;
+ }
+
+ switch (pdata->cal_type) {
+ case TYPE_TWO_POINT_TRIMMING:
+ temp = (temp_code - data->temp_error1) *
+ (pdata->second_point_trim - pdata->first_point_trim) /
+ (data->temp_error2 - data->temp_error1) +
+ pdata->first_point_trim;
+ break;
+ case TYPE_ONE_POINT_TRIMMING:
+ temp = temp_code - data->temp_error1 + pdata->first_point_trim;
+ break;
+ default:
+ temp = temp_code - pdata->default_temp_offset;
+ break;
+ }
+out:
+ return temp;
+}
+
+static int exynos_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ const struct exynos_tmu_registers *reg = pdata->registers;
+ unsigned int status, trim_info = 0, con;
+ unsigned int rising_threshold = 0, falling_threshold = 0;
+ int ret = 0, threshold_code, i, trigger_levs = 0;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ if (TMU_SUPPORTS(pdata, READY_STATUS)) {
+ status = readb(data->base + reg->tmu_status);
+ if (!status) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
+ __raw_writel(1, data->base + reg->triminfo_ctrl);
+
+ if (pdata->cal_mode == HW_MODE)
+ goto skip_calib_data;
+
+ /* Save trimming info in order to perform calibration */
+ if (data->soc == SOC_ARCH_EXYNOS5440) {
+ /*
+ * For exynos5440 soc triminfo value is swapped between TMU0 and
+ * TMU2, so the below logic is needed.
+ */
+ switch (data->id) {
+ case 0:
+ trim_info = readl(data->base +
+ EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
+ break;
+ case 1:
+ trim_info = readl(data->base + reg->triminfo_data);
+ break;
+ case 2:
+ trim_info = readl(data->base -
+ EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
+ }
+ } else {
+ trim_info = readl(data->base + reg->triminfo_data);
+ }
+ data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
+ data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
+ EXYNOS_TMU_TEMP_MASK);
+
+ if (!data->temp_error1 ||
+ (pdata->min_efuse_value > data->temp_error1) ||
+ (data->temp_error1 > pdata->max_efuse_value))
+ data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
+
+ if (!data->temp_error2)
+ data->temp_error2 =
+ (pdata->efuse_value >> reg->triminfo_85_shift) &
+ EXYNOS_TMU_TEMP_MASK;
+
+skip_calib_data:
+ if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
+ dev_err(&pdev->dev, "Invalid max trigger level\n");
+ goto out;
+ }
+
+ for (i = 0; i < pdata->max_trigger_level; i++) {
+ if (!pdata->trigger_levels[i])
+ continue;
+
+ if ((pdata->trigger_type[i] == HW_TRIP) &&
+ (!pdata->trigger_levels[pdata->max_trigger_level - 1])) {
+ dev_err(&pdev->dev, "Invalid hw trigger level\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Count trigger levels except the HW trip*/
+ if (!(pdata->trigger_type[i] == HW_TRIP))
+ trigger_levs++;
+ }
+
+ if (data->soc == SOC_ARCH_EXYNOS4210) {
+ /* Write temperature code for threshold */
+ threshold_code = temp_to_code(data, pdata->threshold);
+ if (threshold_code < 0) {
+ ret = threshold_code;
+ goto out;
+ }
+ writeb(threshold_code,
+ data->base + reg->threshold_temp);
+ for (i = 0; i < trigger_levs; i++)
+ writeb(pdata->trigger_levels[i], data->base +
+ reg->threshold_th0 + i * sizeof(reg->threshold_th0));
+
+ writel(reg->inten_rise_mask, data->base + reg->tmu_intclear);
+ } else {
+ /* Write temperature code for rising and falling threshold */
+ for (i = 0;
+ i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i]);
+ if (threshold_code < 0) {
+ ret = threshold_code;
+ goto out;
+ }
+ rising_threshold |= threshold_code << 8 * i;
+ if (pdata->threshold_falling) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i] -
+ pdata->threshold_falling);
+ if (threshold_code > 0)
+ falling_threshold |=
+ threshold_code << 8 * i;
+ }
+ }
+
+ writel(rising_threshold,
+ data->base + reg->threshold_th0);
+ writel(falling_threshold,
+ data->base + reg->threshold_th1);
+
+ writel((reg->inten_rise_mask << reg->inten_rise_shift) |
+ (reg->inten_fall_mask << reg->inten_fall_shift),
+ data->base + reg->tmu_intclear);
+
+ /* if last threshold limit is also present */
+ i = pdata->max_trigger_level - 1;
+ if (pdata->trigger_levels[i] &&
+ (pdata->trigger_type[i] == HW_TRIP)) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i]);
+ if (threshold_code < 0) {
+ ret = threshold_code;
+ goto out;
+ }
+ if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
+ /* 1-4 level to be assigned in th0 reg */
+ rising_threshold |= threshold_code << 8 * i;
+ writel(rising_threshold,
+ data->base + reg->threshold_th0);
+ } else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
+ /* 5th level to be assigned in th2 reg */
+ rising_threshold =
+ threshold_code << reg->threshold_th3_l0_shift;
+ writel(rising_threshold,
+ data->base + reg->threshold_th2);
+ }
+ con = readl(data->base + reg->tmu_ctrl);
+ con |= (1 << reg->therm_trip_en_shift);
+ writel(con, data->base + reg->tmu_ctrl);
+ }
+ }
+ /*Clear the PMIN in the common TMU register*/
+ if (reg->tmu_pmin && !data->id)
+ writel(0, data->base_common + reg->tmu_pmin);
+out:
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static void exynos_tmu_control(struct platform_device *pdev, bool on)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ const struct exynos_tmu_registers *reg = pdata->registers;
+ unsigned int con, interrupt_en, cal_val;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ con = readl(data->base + reg->tmu_ctrl);
+
+ if (pdata->reference_voltage) {
+ con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
+ con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
+ }
+
+ if (pdata->gain) {
+ con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
+ con |= (pdata->gain << reg->buf_slope_sel_shift);
+ }
+
+ if (pdata->noise_cancel_mode) {
+ con &= ~(reg->therm_trip_mode_mask <<
+ reg->therm_trip_mode_shift);
+ con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
+ }
+
+ if (pdata->cal_mode == HW_MODE) {
+ con &= ~(reg->calib_mode_mask << reg->calib_mode_shift);
+ cal_val = 0;
+ switch (pdata->cal_type) {
+ case TYPE_TWO_POINT_TRIMMING:
+ cal_val = 3;
+ break;
+ case TYPE_ONE_POINT_TRIMMING_85:
+ cal_val = 2;
+ break;
+ case TYPE_ONE_POINT_TRIMMING_25:
+ cal_val = 1;
+ break;
+ case TYPE_NONE:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid calibration type, using none\n");
+ }
+ con |= cal_val << reg->calib_mode_shift;
+ }
+
+ if (on) {
+ con |= (1 << reg->core_en_shift);
+ interrupt_en =
+ pdata->trigger_enable[3] << reg->inten_rise3_shift |
+ pdata->trigger_enable[2] << reg->inten_rise2_shift |
+ pdata->trigger_enable[1] << reg->inten_rise1_shift |
+ pdata->trigger_enable[0] << reg->inten_rise0_shift;
+ if (TMU_SUPPORTS(pdata, FALLING_TRIP))
+ interrupt_en |=
+ interrupt_en << reg->inten_fall0_shift;
+ } else {
+ con &= ~(1 << reg->core_en_shift);
+ interrupt_en = 0; /* Disable all interrupts */
+ }
+ writel(interrupt_en, data->base + reg->tmu_inten);
+ writel(con, data->base + reg->tmu_ctrl);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+}
+
+static int exynos_tmu_read(struct exynos_tmu_data *data)
+{
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ const struct exynos_tmu_registers *reg = pdata->registers;
+ u8 temp_code;
+ int temp;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ temp_code = readb(data->base + reg->tmu_cur_temp);
+ temp = code_to_temp(data, temp_code);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ return temp;
+}
+
+#ifdef CONFIG_THERMAL_EMULATION
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+{
+ struct exynos_tmu_data *data = drv_data;
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ const struct exynos_tmu_registers *reg = pdata->registers;
+ unsigned int val;
+ int ret = -EINVAL;
+
+ if (!TMU_SUPPORTS(pdata, EMULATION))
+ goto out;
+
+ if (temp && temp < MCELSIUS)
+ goto out;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ val = readl(data->base + reg->emul_con);
+
+ if (temp) {
+ temp /= MCELSIUS;
+
+ if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
+ val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
+ val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
+ }
+ val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
+ val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
+ EXYNOS_EMUL_ENABLE;
+ } else {
+ val &= ~EXYNOS_EMUL_ENABLE;
+ }
+
+ writel(val, data->base + reg->emul_con);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+ return 0;
+out:
+ return ret;
+}
+#else
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+ { return -EINVAL; }
+#endif/*CONFIG_THERMAL_EMULATION*/
+
+static void exynos_tmu_work(struct work_struct *work)
+{
+ struct exynos_tmu_data *data = container_of(work,
+ struct exynos_tmu_data, irq_work);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ const struct exynos_tmu_registers *reg = pdata->registers;
+ unsigned int val_irq, val_type;
+
+ /* Find which sensor generated this interrupt */
+ if (reg->tmu_irqstatus) {
+ val_type = readl(data->base_common + reg->tmu_irqstatus);
+ if (!((val_type >> data->id) & 0x1))
+ goto out;
+ }
+
+ exynos_report_trigger(data->reg_conf);
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ /* TODO: take action based on particular interrupt */
+ val_irq = readl(data->base + reg->tmu_intstat);
+ /* clear the interrupts */
+ writel(val_irq, data->base + reg->tmu_intclear);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+out:
+ enable_irq(data->irq);
+}
+
+static irqreturn_t exynos_tmu_irq(int irq, void *id)
+{
+ struct exynos_tmu_data *data = id;
+
+ disable_irq_nosync(irq);
+ schedule_work(&data->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id exynos_tmu_match[] = {
+ {
+ .compatible = "samsung,exynos4210-tmu",
+ .data = (void *)EXYNOS4210_TMU_DRV_DATA,
+ },
+ {
+ .compatible = "samsung,exynos4412-tmu",
+ .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+ },
+ {
+ .compatible = "samsung,exynos5250-tmu",
+ .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+ },
+ {
+ .compatible = "samsung,exynos5440-tmu",
+ .data = (void *)EXYNOS5440_TMU_DRV_DATA,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_tmu_match);
+
+static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
+ struct platform_device *pdev, int id)
+{
+ struct exynos_tmu_init_data *data_table;
+ struct exynos_tmu_platform_data *tmu_data;
+ const struct of_device_id *match;
+
+ match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
+ if (!match)
+ return NULL;
+ data_table = (struct exynos_tmu_init_data *) match->data;
+ if (!data_table || id >= data_table->tmu_count)
+ return NULL;
+ tmu_data = data_table->tmu_data;
+ return (struct exynos_tmu_platform_data *) (tmu_data + id);
+}
+
+static int exynos_map_dt_data(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata;
+ struct resource res;
+ int ret;
+
+ if (!data || !pdev->dev.of_node)
+ return -ENODEV;
+
+ /*
+ * Try enabling the regulator if found
+ * TODO: Add regulator as an SOC feature, so that regulator enable
+ * is a compulsory call.
+ */
+ data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
+ if (!IS_ERR(data->regulator)) {
+ ret = regulator_enable(data->regulator);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable vtmu\n");
+ return ret;
+ }
+ } else {
+ dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
+ }
+
+ data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
+ if (data->id < 0)
+ data->id = 0;
+
+ data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (data->irq <= 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
+ dev_err(&pdev->dev, "failed to get Resource 0\n");
+ return -ENODEV;
+ }
+
+ data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
+ if (!data->base) {
+ dev_err(&pdev->dev, "Failed to ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ pdata = exynos_get_driver_data(pdev, data->id);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform init data supplied.\n");
+ return -ENODEV;
+ }
+ data->pdata = pdata;
+ /*
+ * Check if the TMU shares some registers and then try to map the
+ * memory of common registers.
+ */
+ if (!TMU_SUPPORTS(pdata, SHARED_MEMORY))
+ return 0;
+
+ if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
+ dev_err(&pdev->dev, "failed to get Resource 1\n");
+ return -ENODEV;
+ }
+
+ data->base_common = devm_ioremap(&pdev->dev, res.start,
+ resource_size(&res));
+ if (!data->base_common) {
+ dev_err(&pdev->dev, "Failed to ioremap memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int exynos_tmu_probe(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data;
+ struct exynos_tmu_platform_data *pdata;
+ struct thermal_sensor_conf *sensor_conf;
+ int ret, i;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
+ GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "Failed to allocate driver structure\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, data);
+ mutex_init(&data->lock);
+
+ ret = exynos_map_dt_data(pdev);
+ if (ret)
+ return ret;
+
+ pdata = data->pdata;
+
+ INIT_WORK(&data->irq_work, exynos_tmu_work);
+
+ data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
+ if (IS_ERR(data->clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return PTR_ERR(data->clk);
+ }
+
+ ret = clk_prepare(data->clk);
+ if (ret)
+ return ret;
+
+ if (pdata->type == SOC_ARCH_EXYNOS ||
+ pdata->type == SOC_ARCH_EXYNOS4210 ||
+ pdata->type == SOC_ARCH_EXYNOS5440)
+ data->soc = pdata->type;
+ else {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Platform not supported\n");
+ goto err_clk;
+ }
+
+ ret = exynos_tmu_initialize(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize TMU\n");
+ goto err_clk;
+ }
+
+ exynos_tmu_control(pdev, true);
+
+ /* Allocate a structure to register with the exynos core thermal */
+ sensor_conf = devm_kzalloc(&pdev->dev,
+ sizeof(struct thermal_sensor_conf), GFP_KERNEL);
+ if (!sensor_conf) {
+ dev_err(&pdev->dev, "Failed to allocate registration struct\n");
+ ret = -ENOMEM;
+ goto err_clk;
+ }
+ sprintf(sensor_conf->name, "therm_zone%d", data->id);
+ sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
+ sensor_conf->write_emul_temp =
+ (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
+ sensor_conf->driver_data = data;
+ sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
+ pdata->trigger_enable[1] + pdata->trigger_enable[2]+
+ pdata->trigger_enable[3];
+
+ for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
+ sensor_conf->trip_data.trip_val[i] =
+ pdata->threshold + pdata->trigger_levels[i];
+ sensor_conf->trip_data.trip_type[i] =
+ pdata->trigger_type[i];
+ }
+
+ sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
+
+ sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
+ for (i = 0; i < pdata->freq_tab_count; i++) {
+ sensor_conf->cooling_data.freq_data[i].freq_clip_max =
+ pdata->freq_tab[i].freq_clip_max;
+ sensor_conf->cooling_data.freq_data[i].temp_level =
+ pdata->freq_tab[i].temp_level;
+ }
+ sensor_conf->dev = &pdev->dev;
+ /* Register the sensor with thermal management interface */
+ ret = exynos_register_thermal(sensor_conf);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register thermal interface\n");
+ goto err_clk;
+ }
+ data->reg_conf = sensor_conf;
+
+ ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
+ IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
+ goto err_clk;
+ }
+
+ return 0;
+err_clk:
+ clk_unprepare(data->clk);
+ return ret;
+}
+
+static int exynos_tmu_remove(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+
+ exynos_tmu_control(pdev, false);
+
+ exynos_unregister_thermal(data->reg_conf);
+
+ clk_unprepare(data->clk);
+
+ if (!IS_ERR(data->regulator))
+ regulator_disable(data->regulator);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_tmu_suspend(struct device *dev)
+{
+ exynos_tmu_control(to_platform_device(dev), false);
+
+ return 0;
+}
+
+static int exynos_tmu_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ exynos_tmu_initialize(pdev);
+ exynos_tmu_control(pdev, true);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
+ exynos_tmu_suspend, exynos_tmu_resume);
+#define EXYNOS_TMU_PM (&exynos_tmu_pm)
+#else
+#define EXYNOS_TMU_PM NULL
+#endif
+
+static struct platform_driver exynos_tmu_driver = {
+ .driver = {
+ .name = "exynos-tmu",
+ .owner = THIS_MODULE,
+ .pm = EXYNOS_TMU_PM,
+ .of_match_table = exynos_tmu_match,
+ },
+ .probe = exynos_tmu_probe,
+ .remove = exynos_tmu_remove,
+};
+
+module_platform_driver(exynos_tmu_driver);
+
+MODULE_DESCRIPTION("EXYNOS TMU Driver");
+MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:exynos-tmu");
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
new file mode 100644
index 0000000..b364c9e
--- /dev/null
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -0,0 +1,311 @@
+/*
+ * exynos_tmu.h - Samsung EXYNOS TMU (Thermal Management Unit)
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ * Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _EXYNOS_TMU_H
+#define _EXYNOS_TMU_H
+#include <linux/cpu_cooling.h>
+
+#include "exynos_thermal_common.h"
+
+enum calibration_type {
+ TYPE_ONE_POINT_TRIMMING,
+ TYPE_ONE_POINT_TRIMMING_25,
+ TYPE_ONE_POINT_TRIMMING_85,
+ TYPE_TWO_POINT_TRIMMING,
+ TYPE_NONE,
+};
+
+enum calibration_mode {
+ SW_MODE,
+ HW_MODE,
+};
+
+enum soc_type {
+ SOC_ARCH_EXYNOS4210 = 1,
+ SOC_ARCH_EXYNOS,
+ SOC_ARCH_EXYNOS5440,
+};
+
+/**
+ * EXYNOS TMU supported features.
+ * TMU_SUPPORT_EMULATION - This features is used to set user defined
+ * temperature to the TMU controller.
+ * TMU_SUPPORT_MULTI_INST - This features denotes that the soc
+ * has many instances of TMU.
+ * TMU_SUPPORT_TRIM_RELOAD - This features shows that trimming can
+ * be reloaded.
+ * TMU_SUPPORT_FALLING_TRIP - This features shows that interrupt can
+ * be registered for falling trips also.
+ * TMU_SUPPORT_READY_STATUS - This feature tells that the TMU current
+ * state(active/idle) can be checked.
+ * TMU_SUPPORT_EMUL_TIME - This features allows to set next temp emulation
+ * sample time.
+ * TMU_SUPPORT_SHARED_MEMORY - This feature tells that the different TMU
+ * sensors shares some common registers.
+ * TMU_SUPPORT - macro to compare the above features with the supplied.
+ */
+#define TMU_SUPPORT_EMULATION BIT(0)
+#define TMU_SUPPORT_MULTI_INST BIT(1)
+#define TMU_SUPPORT_TRIM_RELOAD BIT(2)
+#define TMU_SUPPORT_FALLING_TRIP BIT(3)
+#define TMU_SUPPORT_READY_STATUS BIT(4)
+#define TMU_SUPPORT_EMUL_TIME BIT(5)
+#define TMU_SUPPORT_SHARED_MEMORY BIT(6)
+
+#define TMU_SUPPORTS(a, b) (a->features & TMU_SUPPORT_ ## b)
+
+/**
+ * struct exynos_tmu_register - register descriptors to access registers and
+ * bitfields. The register validity, offsets and bitfield values may vary
+ * slightly across different exynos SOC's.
+ * @triminfo_data: register containing 2 pont trimming data
+ * @triminfo_25_shift: shift bit of the 25 C trim value in triminfo_data reg.
+ * @triminfo_85_shift: shift bit of the 85 C trim value in triminfo_data reg.
+ * @triminfo_ctrl: trim info controller register.
+ * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
+ reg.
+ * @tmu_ctrl: TMU main controller register.
+ * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
+ * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
+ * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
+ * @therm_trip_mode_mask: mask bits of tripping mode in tmu_ctrl register.
+ * @therm_trip_en_shift: shift bits of tripping enable in tmu_ctrl register.
+ * @buf_slope_sel_shift: shift bits of amplifier gain value in tmu_ctrl
+ register.
+ * @buf_slope_sel_mask: mask bits of amplifier gain value in tmu_ctrl register.
+ * @calib_mode_shift: shift bits of calibration mode value in tmu_ctrl
+ register.
+ * @calib_mode_mask: mask bits of calibration mode value in tmu_ctrl
+ register.
+ * @therm_trip_tq_en_shift: shift bits of thermal trip enable by TQ pin in
+ tmu_ctrl register.
+ * @core_en_shift: shift bits of TMU core enable bit in tmu_ctrl register.
+ * @tmu_status: register drescribing the TMU status.
+ * @tmu_cur_temp: register containing the current temperature of the TMU.
+ * @tmu_cur_temp_shift: shift bits of current temp value in tmu_cur_temp
+ register.
+ * @threshold_temp: register containing the base threshold level.
+ * @threshold_th0: Register containing first set of rising levels.
+ * @threshold_th0_l0_shift: shift bits of level0 threshold temperature.
+ * @threshold_th0_l1_shift: shift bits of level1 threshold temperature.
+ * @threshold_th0_l2_shift: shift bits of level2 threshold temperature.
+ * @threshold_th0_l3_shift: shift bits of level3 threshold temperature.
+ * @threshold_th1: Register containing second set of rising levels.
+ * @threshold_th1_l0_shift: shift bits of level0 threshold temperature.
+ * @threshold_th1_l1_shift: shift bits of level1 threshold temperature.
+ * @threshold_th1_l2_shift: shift bits of level2 threshold temperature.
+ * @threshold_th1_l3_shift: shift bits of level3 threshold temperature.
+ * @threshold_th2: Register containing third set of rising levels.
+ * @threshold_th2_l0_shift: shift bits of level0 threshold temperature.
+ * @threshold_th3: Register containing fourth set of rising levels.
+ * @threshold_th3_l0_shift: shift bits of level0 threshold temperature.
+ * @tmu_inten: register containing the different threshold interrupt
+ enable bits.
+ * @inten_rise_shift: shift bits of all rising interrupt bits.
+ * @inten_rise_mask: mask bits of all rising interrupt bits.
+ * @inten_fall_shift: shift bits of all rising interrupt bits.
+ * @inten_fall_mask: mask bits of all rising interrupt bits.
+ * @inten_rise0_shift: shift bits of rising 0 interrupt bits.
+ * @inten_rise1_shift: shift bits of rising 1 interrupt bits.
+ * @inten_rise2_shift: shift bits of rising 2 interrupt bits.
+ * @inten_rise3_shift: shift bits of rising 3 interrupt bits.
+ * @inten_fall0_shift: shift bits of falling 0 interrupt bits.
+ * @inten_fall1_shift: shift bits of falling 1 interrupt bits.
+ * @inten_fall2_shift: shift bits of falling 2 interrupt bits.
+ * @inten_fall3_shift: shift bits of falling 3 interrupt bits.
+ * @tmu_intstat: Register containing the interrupt status values.
+ * @tmu_intclear: Register for clearing the raised interrupt status.
+ * @emul_con: TMU emulation controller register.
+ * @emul_temp_shift: shift bits of emulation temperature.
+ * @emul_time_shift: shift bits of emulation time.
+ * @emul_time_mask: mask bits of emulation time.
+ * @tmu_irqstatus: register to find which TMU generated interrupts.
+ * @tmu_pmin: register to get/set the Pmin value.
+ */
+struct exynos_tmu_registers {
+ u32 triminfo_data;
+ u32 triminfo_25_shift;
+ u32 triminfo_85_shift;
+
+ u32 triminfo_ctrl;
+ u32 triminfo_reload_shift;
+
+ u32 tmu_ctrl;
+ u32 buf_vref_sel_shift;
+ u32 buf_vref_sel_mask;
+ u32 therm_trip_mode_shift;
+ u32 therm_trip_mode_mask;
+ u32 therm_trip_en_shift;
+ u32 buf_slope_sel_shift;
+ u32 buf_slope_sel_mask;
+ u32 calib_mode_shift;
+ u32 calib_mode_mask;
+ u32 therm_trip_tq_en_shift;
+ u32 core_en_shift;
+
+ u32 tmu_status;
+
+ u32 tmu_cur_temp;
+ u32 tmu_cur_temp_shift;
+
+ u32 threshold_temp;
+
+ u32 threshold_th0;
+ u32 threshold_th0_l0_shift;
+ u32 threshold_th0_l1_shift;
+ u32 threshold_th0_l2_shift;
+ u32 threshold_th0_l3_shift;
+
+ u32 threshold_th1;
+ u32 threshold_th1_l0_shift;
+ u32 threshold_th1_l1_shift;
+ u32 threshold_th1_l2_shift;
+ u32 threshold_th1_l3_shift;
+
+ u32 threshold_th2;
+ u32 threshold_th2_l0_shift;
+
+ u32 threshold_th3;
+ u32 threshold_th3_l0_shift;
+
+ u32 tmu_inten;
+ u32 inten_rise_shift;
+ u32 inten_rise_mask;
+ u32 inten_fall_shift;
+ u32 inten_fall_mask;
+ u32 inten_rise0_shift;
+ u32 inten_rise1_shift;
+ u32 inten_rise2_shift;
+ u32 inten_rise3_shift;
+ u32 inten_fall0_shift;
+ u32 inten_fall1_shift;
+ u32 inten_fall2_shift;
+ u32 inten_fall3_shift;
+
+ u32 tmu_intstat;
+
+ u32 tmu_intclear;
+
+ u32 emul_con;
+ u32 emul_temp_shift;
+ u32 emul_time_shift;
+ u32 emul_time_mask;
+
+ u32 tmu_irqstatus;
+ u32 tmu_pmin;
+};
+
+/**
+ * struct exynos_tmu_platform_data
+ * @threshold: basic temperature for generating interrupt
+ * 25 <= threshold <= 125 [unit: degree Celsius]
+ * @threshold_falling: differntial value for setting threshold
+ * of temperature falling interrupt.
+ * @trigger_levels: array for each interrupt levels
+ * [unit: degree Celsius]
+ * 0: temperature for trigger_level0 interrupt
+ * condition for trigger_level0 interrupt:
+ * current temperature > threshold + trigger_levels[0]
+ * 1: temperature for trigger_level1 interrupt
+ * condition for trigger_level1 interrupt:
+ * current temperature > threshold + trigger_levels[1]
+ * 2: temperature for trigger_level2 interrupt
+ * condition for trigger_level2 interrupt:
+ * current temperature > threshold + trigger_levels[2]
+ * 3: temperature for trigger_level3 interrupt
+ * condition for trigger_level3 interrupt:
+ * current temperature > threshold + trigger_levels[3]
+ * @trigger_type: defines the type of trigger. Possible values are,
+ * THROTTLE_ACTIVE trigger type
+ * THROTTLE_PASSIVE trigger type
+ * SW_TRIP trigger type
+ * HW_TRIP
+ * @trigger_enable[]: array to denote which trigger levels are enabled.
+ * 1 = enable trigger_level[] interrupt,
+ * 0 = disable trigger_level[] interrupt
+ * @max_trigger_level: max trigger level supported by the TMU
+ * @gain: gain of amplifier in the positive-TC generator block
+ * 0 <= gain <= 15
+ * @reference_voltage: reference voltage of amplifier
+ * in the positive-TC generator block
+ * 0 <= reference_voltage <= 31
+ * @noise_cancel_mode: noise cancellation mode
+ * 000, 100, 101, 110 and 111 can be different modes
+ * @type: determines the type of SOC
+ * @efuse_value: platform defined fuse value
+ * @min_efuse_value: minimum valid trimming data
+ * @max_efuse_value: maximum valid trimming data
+ * @first_point_trim: temp value of the first point trimming
+ * @second_point_trim: temp value of the second point trimming
+ * @default_temp_offset: default temperature offset in case of no trimming
+ * @cal_type: calibration type for temperature
+ * @cal_mode: calibration mode for temperature
+ * @freq_clip_table: Table representing frequency reduction percentage.
+ * @freq_tab_count: Count of the above table as frequency reduction may
+ * applicable to only some of the trigger levels.
+ * @registers: Pointer to structure containing all the TMU controller registers
+ * and bitfields shifts and masks.
+ * @features: a bitfield value indicating the features supported in SOC like
+ * emulation, multi instance etc
+ *
+ * This structure is required for configuration of exynos_tmu driver.
+ */
+struct exynos_tmu_platform_data {
+ u8 threshold;
+ u8 threshold_falling;
+ u8 trigger_levels[MAX_TRIP_COUNT];
+ enum trigger_type trigger_type[MAX_TRIP_COUNT];
+ bool trigger_enable[MAX_TRIP_COUNT];
+ u8 max_trigger_level;
+ u8 gain;
+ u8 reference_voltage;
+ u8 noise_cancel_mode;
+
+ u32 efuse_value;
+ u32 min_efuse_value;
+ u32 max_efuse_value;
+ u8 first_point_trim;
+ u8 second_point_trim;
+ u8 default_temp_offset;
+
+ enum calibration_type cal_type;
+ enum calibration_mode cal_mode;
+ enum soc_type type;
+ struct freq_clip_table freq_tab[4];
+ unsigned int freq_tab_count;
+ const struct exynos_tmu_registers *registers;
+ unsigned int features;
+};
+
+/**
+ * struct exynos_tmu_init_data
+ * @tmu_count: number of TMU instances.
+ * @tmu_data: platform data of all TMU instances.
+ * This structure is required to store data for multi-instance exynos tmu
+ * driver.
+ */
+struct exynos_tmu_init_data {
+ int tmu_count;
+ struct exynos_tmu_platform_data tmu_data[];
+};
+
+#endif /* _EXYNOS_TMU_H */
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
new file mode 100644
index 0000000..9002499
--- /dev/null
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -0,0 +1,250 @@
+/*
+ * exynos_tmu_data.c - Samsung EXYNOS tmu data file
+ *
+ * Copyright (C) 2013 Samsung Electronics
+ * Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "exynos_thermal_common.h"
+#include "exynos_tmu.h"
+#include "exynos_tmu_data.h"
+
+#if defined(CONFIG_CPU_EXYNOS4210)
+static const struct exynos_tmu_registers exynos4210_tmu_registers = {
+ .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
+ .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+ .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+ .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+ .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+ .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+ .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+ .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+ .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+ .tmu_status = EXYNOS_TMU_REG_STATUS,
+ .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
+ .threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP,
+ .threshold_th0 = EXYNOS4210_TMU_REG_TRIG_LEVEL0,
+ .tmu_inten = EXYNOS_TMU_REG_INTEN,
+ .inten_rise_mask = EXYNOS4210_TMU_TRIG_LEVEL_MASK,
+ .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
+ .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
+ .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
+ .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
+ .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
+ .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
+};
+
+struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
+ .tmu_data = {
+ {
+ .threshold = 80,
+ .trigger_levels[0] = 5,
+ .trigger_levels[1] = 20,
+ .trigger_levels[2] = 30,
+ .trigger_enable[0] = true,
+ .trigger_enable[1] = true,
+ .trigger_enable[2] = true,
+ .trigger_enable[3] = false,
+ .trigger_type[0] = THROTTLE_ACTIVE,
+ .trigger_type[1] = THROTTLE_ACTIVE,
+ .trigger_type[2] = SW_TRIP,
+ .max_trigger_level = 4,
+ .gain = 15,
+ .reference_voltage = 7,
+ .cal_type = TYPE_ONE_POINT_TRIMMING,
+ .min_efuse_value = 40,
+ .max_efuse_value = 100,
+ .first_point_trim = 25,
+ .second_point_trim = 85,
+ .default_temp_offset = 50,
+ .freq_tab[0] = {
+ .freq_clip_max = 800 * 1000,
+ .temp_level = 85,
+ },
+ .freq_tab[1] = {
+ .freq_clip_max = 200 * 1000,
+ .temp_level = 100,
+ },
+ .freq_tab_count = 2,
+ .type = SOC_ARCH_EXYNOS4210,
+ .registers = &exynos4210_tmu_registers,
+ .features = TMU_SUPPORT_READY_STATUS,
+ },
+ },
+ .tmu_count = 1,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
+static const struct exynos_tmu_registers exynos5250_tmu_registers = {
+ .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
+ .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+ .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+ .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
+ .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
+ .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+ .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+ .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+ .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
+ .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
+ .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
+ .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+ .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+ .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+ .tmu_status = EXYNOS_TMU_REG_STATUS,
+ .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
+ .threshold_th0 = EXYNOS_THD_TEMP_RISE,
+ .threshold_th1 = EXYNOS_THD_TEMP_FALL,
+ .tmu_inten = EXYNOS_TMU_REG_INTEN,
+ .inten_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
+ .inten_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
+ .inten_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
+ .inten_fall_shift = EXYNOS_TMU_FALL_INT_SHIFT,
+ .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
+ .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
+ .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
+ .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
+ .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
+ .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
+ .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
+ .emul_con = EXYNOS_EMUL_CON,
+ .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
+ .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
+ .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
+};
+
+#define EXYNOS5250_TMU_DATA \
+ .threshold_falling = 10, \
+ .trigger_levels[0] = 85, \
+ .trigger_levels[1] = 103, \
+ .trigger_levels[2] = 110, \
+ .trigger_levels[3] = 120, \
+ .trigger_enable[0] = true, \
+ .trigger_enable[1] = true, \
+ .trigger_enable[2] = true, \
+ .trigger_enable[3] = false, \
+ .trigger_type[0] = THROTTLE_ACTIVE, \
+ .trigger_type[1] = THROTTLE_ACTIVE, \
+ .trigger_type[2] = SW_TRIP, \
+ .trigger_type[3] = HW_TRIP, \
+ .max_trigger_level = 4, \
+ .gain = 8, \
+ .reference_voltage = 16, \
+ .noise_cancel_mode = 4, \
+ .cal_type = TYPE_ONE_POINT_TRIMMING, \
+ .efuse_value = 55, \
+ .min_efuse_value = 40, \
+ .max_efuse_value = 100, \
+ .first_point_trim = 25, \
+ .second_point_trim = 85, \
+ .default_temp_offset = 50, \
+ .freq_tab[0] = { \
+ .freq_clip_max = 800 * 1000, \
+ .temp_level = 85, \
+ }, \
+ .freq_tab[1] = { \
+ .freq_clip_max = 200 * 1000, \
+ .temp_level = 103, \
+ }, \
+ .freq_tab_count = 2, \
+ .type = SOC_ARCH_EXYNOS, \
+ .registers = &exynos5250_tmu_registers, \
+ .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
+ TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
+ TMU_SUPPORT_EMUL_TIME)
+
+struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
+ .tmu_data = {
+ { EXYNOS5250_TMU_DATA },
+ },
+ .tmu_count = 1,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5440)
+static const struct exynos_tmu_registers exynos5440_tmu_registers = {
+ .triminfo_data = EXYNOS5440_TMU_S0_7_TRIM,
+ .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+ .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+ .tmu_ctrl = EXYNOS5440_TMU_S0_7_CTRL,
+ .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+ .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+ .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
+ .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
+ .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
+ .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+ .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+ .calib_mode_shift = EXYNOS_TMU_CALIB_MODE_SHIFT,
+ .calib_mode_mask = EXYNOS_TMU_CALIB_MODE_MASK,
+ .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+ .tmu_status = EXYNOS5440_TMU_S0_7_STATUS,
+ .tmu_cur_temp = EXYNOS5440_TMU_S0_7_TEMP,
+ .threshold_th0 = EXYNOS5440_TMU_S0_7_TH0,
+ .threshold_th1 = EXYNOS5440_TMU_S0_7_TH1,
+ .threshold_th2 = EXYNOS5440_TMU_S0_7_TH2,
+ .threshold_th3_l0_shift = EXYNOS5440_TMU_TH_RISE4_SHIFT,
+ .tmu_inten = EXYNOS5440_TMU_S0_7_IRQEN,
+ .inten_rise_mask = EXYNOS5440_TMU_RISE_INT_MASK,
+ .inten_rise_shift = EXYNOS5440_TMU_RISE_INT_SHIFT,
+ .inten_fall_mask = EXYNOS5440_TMU_FALL_INT_MASK,
+ .inten_fall_shift = EXYNOS5440_TMU_FALL_INT_SHIFT,
+ .inten_rise0_shift = EXYNOS5440_TMU_INTEN_RISE0_SHIFT,
+ .inten_rise1_shift = EXYNOS5440_TMU_INTEN_RISE1_SHIFT,
+ .inten_rise2_shift = EXYNOS5440_TMU_INTEN_RISE2_SHIFT,
+ .inten_rise3_shift = EXYNOS5440_TMU_INTEN_RISE3_SHIFT,
+ .inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT,
+ .tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ,
+ .tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ,
+ .tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS,
+ .emul_con = EXYNOS5440_TMU_S0_7_DEBUG,
+ .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
+ .tmu_pmin = EXYNOS5440_TMU_PMIN,
+};
+
+#define EXYNOS5440_TMU_DATA \
+ .trigger_levels[0] = 100, \
+ .trigger_levels[4] = 105, \
+ .trigger_enable[0] = 1, \
+ .trigger_type[0] = SW_TRIP, \
+ .trigger_type[4] = HW_TRIP, \
+ .max_trigger_level = 5, \
+ .gain = 5, \
+ .reference_voltage = 16, \
+ .noise_cancel_mode = 4, \
+ .cal_type = TYPE_ONE_POINT_TRIMMING, \
+ .cal_mode = 0, \
+ .efuse_value = 0x5b2d, \
+ .min_efuse_value = 16, \
+ .max_efuse_value = 76, \
+ .first_point_trim = 25, \
+ .second_point_trim = 70, \
+ .default_temp_offset = 25, \
+ .type = SOC_ARCH_EXYNOS5440, \
+ .registers = &exynos5440_tmu_registers, \
+ .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
+ TMU_SUPPORT_MULTI_INST | TMU_SUPPORT_SHARED_MEMORY),
+
+struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
+ .tmu_data = {
+ { EXYNOS5440_TMU_DATA } ,
+ { EXYNOS5440_TMU_DATA } ,
+ { EXYNOS5440_TMU_DATA } ,
+ },
+ .tmu_count = 3,
+};
+#endif
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
new file mode 100644
index 0000000..dc7feb5
--- /dev/null
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -0,0 +1,155 @@
+/*
+ * exynos_tmu_data.h - Samsung EXYNOS tmu data header file
+ *
+ * Copyright (C) 2013 Samsung Electronics
+ * Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _EXYNOS_TMU_DATA_H
+#define _EXYNOS_TMU_DATA_H
+
+/* Exynos generic registers */
+#define EXYNOS_TMU_REG_TRIMINFO 0x0
+#define EXYNOS_TMU_REG_CONTROL 0x20
+#define EXYNOS_TMU_REG_STATUS 0x28
+#define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
+#define EXYNOS_TMU_REG_INTEN 0x70
+#define EXYNOS_TMU_REG_INTSTAT 0x74
+#define EXYNOS_TMU_REG_INTCLEAR 0x78
+
+#define EXYNOS_TMU_TEMP_MASK 0xff
+#define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
+#define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f
+#define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf
+#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
+#define EXYNOS_TMU_CORE_EN_SHIFT 0
+
+/* Exynos4210 specific registers */
+#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C
+#define EXYNOS4210_TMU_REG_PAST_TEMP0 0x60
+#define EXYNOS4210_TMU_REG_PAST_TEMP1 0x64
+#define EXYNOS4210_TMU_REG_PAST_TEMP2 0x68
+#define EXYNOS4210_TMU_REG_PAST_TEMP3 0x6C
+
+#define EXYNOS4210_TMU_TRIG_LEVEL0_MASK 0x1
+#define EXYNOS4210_TMU_TRIG_LEVEL1_MASK 0x10
+#define EXYNOS4210_TMU_TRIG_LEVEL2_MASK 0x100
+#define EXYNOS4210_TMU_TRIG_LEVEL3_MASK 0x1000
+#define EXYNOS4210_TMU_TRIG_LEVEL_MASK 0x1111
+#define EXYNOS4210_TMU_INTCLEAR_VAL 0x1111
+
+/* Exynos5250 and Exynos4412 specific registers */
+#define EXYNOS_TMU_TRIMINFO_CON 0x14
+#define EXYNOS_THD_TEMP_RISE 0x50
+#define EXYNOS_THD_TEMP_FALL 0x54
+#define EXYNOS_EMUL_CON 0x80
+
+#define EXYNOS_TRIMINFO_RELOAD_SHIFT 1
+#define EXYNOS_TRIMINFO_25_SHIFT 0
+#define EXYNOS_TRIMINFO_85_SHIFT 8
+#define EXYNOS_TMU_RISE_INT_MASK 0x111
+#define EXYNOS_TMU_RISE_INT_SHIFT 0
+#define EXYNOS_TMU_FALL_INT_MASK 0x111
+#define EXYNOS_TMU_FALL_INT_SHIFT 12
+#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
+#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12)
+#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
+#define EXYNOS_TMU_TRIP_MODE_MASK 0x7
+#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
+#define EXYNOS_TMU_CALIB_MODE_SHIFT 4
+#define EXYNOS_TMU_CALIB_MODE_MASK 0x3
+
+#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
+#define EXYNOS_TMU_INTEN_RISE1_SHIFT 4
+#define EXYNOS_TMU_INTEN_RISE2_SHIFT 8
+#define EXYNOS_TMU_INTEN_RISE3_SHIFT 12
+#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
+#define EXYNOS_TMU_INTEN_FALL1_SHIFT 20
+#define EXYNOS_TMU_INTEN_FALL2_SHIFT 24
+
+#define EXYNOS_EMUL_TIME 0x57F0
+#define EXYNOS_EMUL_TIME_MASK 0xffff
+#define EXYNOS_EMUL_TIME_SHIFT 16
+#define EXYNOS_EMUL_DATA_SHIFT 8
+#define EXYNOS_EMUL_DATA_MASK 0xFF
+#define EXYNOS_EMUL_ENABLE 0x1
+
+#define EXYNOS_MAX_TRIGGER_PER_REG 4
+
+/*exynos5440 specific registers*/
+#define EXYNOS5440_TMU_S0_7_TRIM 0x000
+#define EXYNOS5440_TMU_S0_7_CTRL 0x020
+#define EXYNOS5440_TMU_S0_7_DEBUG 0x040
+#define EXYNOS5440_TMU_S0_7_STATUS 0x060
+#define EXYNOS5440_TMU_S0_7_TEMP 0x0f0
+#define EXYNOS5440_TMU_S0_7_TH0 0x110
+#define EXYNOS5440_TMU_S0_7_TH1 0x130
+#define EXYNOS5440_TMU_S0_7_TH2 0x150
+#define EXYNOS5440_TMU_S0_7_EVTEN 0x1F0
+#define EXYNOS5440_TMU_S0_7_IRQEN 0x210
+#define EXYNOS5440_TMU_S0_7_IRQ 0x230
+/* exynos5440 common registers */
+#define EXYNOS5440_TMU_IRQ_STATUS 0x000
+#define EXYNOS5440_TMU_PMIN 0x004
+#define EXYNOS5440_TMU_TEMP 0x008
+
+#define EXYNOS5440_TMU_RISE_INT_MASK 0xf
+#define EXYNOS5440_TMU_RISE_INT_SHIFT 0
+#define EXYNOS5440_TMU_FALL_INT_MASK 0xf
+#define EXYNOS5440_TMU_FALL_INT_SHIFT 4
+#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
+#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
+#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
+#define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
+#define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
+#define EXYNOS5440_TMU_INTEN_FALL1_SHIFT 5
+#define EXYNOS5440_TMU_INTEN_FALL2_SHIFT 6
+#define EXYNOS5440_TMU_INTEN_FALL3_SHIFT 7
+#define EXYNOS5440_TMU_TH_RISE0_SHIFT 0
+#define EXYNOS5440_TMU_TH_RISE1_SHIFT 8
+#define EXYNOS5440_TMU_TH_RISE2_SHIFT 16
+#define EXYNOS5440_TMU_TH_RISE3_SHIFT 24
+#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
+#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
+
+#if defined(CONFIG_CPU_EXYNOS4210)
+extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
+#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
+#else
+#define EXYNOS4210_TMU_DRV_DATA (NULL)
+#endif
+
+#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412))
+extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
+#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
+#else
+#define EXYNOS5250_TMU_DRV_DATA (NULL)
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5440)
+extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
+#define EXYNOS5440_TMU_DRV_DATA (&exynos5440_default_tmu_data)
+#else
+#define EXYNOS5440_TMU_DRV_DATA (NULL)
+#endif
+
+#endif /*_EXYNOS_TMU_DATA_H*/
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 4d4ddae..d89e781 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -51,44 +51,51 @@ static unsigned long get_target_state(struct thermal_instance *instance,
{
struct thermal_cooling_device *cdev = instance->cdev;
unsigned long cur_state;
+ unsigned long next_target;
+ /*
+ * We keep this instance the way it is by default.
+ * Otherwise, we use the current state of the
+ * cdev in use to determine the next_target.
+ */
cdev->ops->get_cur_state(cdev, &cur_state);
+ next_target = instance->target;
switch (trend) {
case THERMAL_TREND_RAISING:
if (throttle) {
- cur_state = cur_state < instance->upper ?
+ next_target = cur_state < instance->upper ?
(cur_state + 1) : instance->upper;
- if (cur_state < instance->lower)
- cur_state = instance->lower;
+ if (next_target < instance->lower)
+ next_target = instance->lower;
}
break;
case THERMAL_TREND_RAISE_FULL:
if (throttle)
- cur_state = instance->upper;
+ next_target = instance->upper;
break;
case THERMAL_TREND_DROPPING:
if (cur_state == instance->lower) {
if (!throttle)
- cur_state = -1;
+ next_target = THERMAL_NO_TARGET;
} else {
- cur_state -= 1;
- if (cur_state > instance->upper)
- cur_state = instance->upper;
+ next_target = cur_state - 1;
+ if (next_target > instance->upper)
+ next_target = instance->upper;
}
break;
case THERMAL_TREND_DROP_FULL:
if (cur_state == instance->lower) {
if (!throttle)
- cur_state = -1;
+ next_target = THERMAL_NO_TARGET;
} else
- cur_state = instance->lower;
+ next_target = instance->lower;
break;
default:
break;
}
- return cur_state;
+ return next_target;
}
static void update_passive_instance(struct thermal_zone_device *tz,
@@ -133,6 +140,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
old_target = instance->target;
instance->target = get_target_state(instance, trend, throttle);
+ if (old_target == instance->target)
+ continue;
+
/* Activate a passive thermal instance */
if (old_target == THERMAL_NO_TARGET &&
instance->target != THERMAL_NO_TARGET)
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 1f02e8e..4962a6a 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -38,6 +38,7 @@
#include <net/genetlink.h>
#include "thermal_core.h"
+#include "thermal_hwmon.h"
MODULE_AUTHOR("Zhang Rui");
MODULE_DESCRIPTION("Generic thermal management sysfs support");
@@ -201,14 +202,23 @@ static void print_bind_err_msg(struct thermal_zone_device *tz,
}
static void __bind(struct thermal_zone_device *tz, int mask,
- struct thermal_cooling_device *cdev)
+ struct thermal_cooling_device *cdev,
+ unsigned long *limits)
{
int i, ret;
for (i = 0; i < tz->trips; i++) {
if (mask & (1 << i)) {
+ unsigned long upper, lower;
+
+ upper = THERMAL_NO_LIMIT;
+ lower = THERMAL_NO_LIMIT;
+ if (limits) {
+ lower = limits[i * 2];
+ upper = limits[i * 2 + 1];
+ }
ret = thermal_zone_bind_cooling_device(tz, i, cdev,
- THERMAL_NO_LIMIT, THERMAL_NO_LIMIT);
+ upper, lower);
if (ret)
print_bind_err_msg(tz, cdev, ret);
}
@@ -253,7 +263,8 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
if (tzp->tbp[i].match(pos, cdev))
continue;
tzp->tbp[i].cdev = cdev;
- __bind(pos, tzp->tbp[i].trip_mask, cdev);
+ __bind(pos, tzp->tbp[i].trip_mask, cdev,
+ tzp->tbp[i].binding_limits);
}
}
@@ -291,7 +302,8 @@ static void bind_tz(struct thermal_zone_device *tz)
if (tzp->tbp[i].match(tz, pos))
continue;
tzp->tbp[i].cdev = pos;
- __bind(tz, tzp->tbp[i].trip_mask, pos);
+ __bind(tz, tzp->tbp[i].trip_mask, pos,
+ tzp->tbp[i].binding_limits);
}
}
exit:
@@ -859,260 +871,6 @@ thermal_cooling_device_trip_point_show(struct device *dev,
/* Device management */
-#if defined(CONFIG_THERMAL_HWMON)
-
-/* hwmon sys I/F */
-#include <linux/hwmon.h>
-
-/* thermal zone devices with the same type share one hwmon device */
-struct thermal_hwmon_device {
- char type[THERMAL_NAME_LENGTH];
- struct device *device;
- int count;
- struct list_head tz_list;
- struct list_head node;
-};
-
-struct thermal_hwmon_attr {
- struct device_attribute attr;
- char name[16];
-};
-
-/* one temperature input for each thermal zone */
-struct thermal_hwmon_temp {
- struct list_head hwmon_node;
- struct thermal_zone_device *tz;
- struct thermal_hwmon_attr temp_input; /* hwmon sys attr */
- struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */
-};
-
-static LIST_HEAD(thermal_hwmon_list);
-
-static ssize_t
-name_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", hwmon->type);
-}
-static DEVICE_ATTR(name, 0444, name_show, NULL);
-
-static ssize_t
-temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- long temperature;
- int ret;
- struct thermal_hwmon_attr *hwmon_attr
- = container_of(attr, struct thermal_hwmon_attr, attr);
- struct thermal_hwmon_temp *temp
- = container_of(hwmon_attr, struct thermal_hwmon_temp,
- temp_input);
- struct thermal_zone_device *tz = temp->tz;
-
- ret = thermal_zone_get_temp(tz, &temperature);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%ld\n", temperature);
-}
-
-static ssize_t
-temp_crit_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct thermal_hwmon_attr *hwmon_attr
- = container_of(attr, struct thermal_hwmon_attr, attr);
- struct thermal_hwmon_temp *temp
- = container_of(hwmon_attr, struct thermal_hwmon_temp,
- temp_crit);
- struct thermal_zone_device *tz = temp->tz;
- long temperature;
- int ret;
-
- ret = tz->ops->get_trip_temp(tz, 0, &temperature);
- if (ret)
- return ret;
-
- return sprintf(buf, "%ld\n", temperature);
-}
-
-
-static struct thermal_hwmon_device *
-thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
-{
- struct thermal_hwmon_device *hwmon;
-
- mutex_lock(&thermal_list_lock);
- list_for_each_entry(hwmon, &thermal_hwmon_list, node)
- if (!strcmp(hwmon->type, tz->type)) {
- mutex_unlock(&thermal_list_lock);
- return hwmon;
- }
- mutex_unlock(&thermal_list_lock);
-
- return NULL;
-}
-
-/* Find the temperature input matching a given thermal zone */
-static struct thermal_hwmon_temp *
-thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
- const struct thermal_zone_device *tz)
-{
- struct thermal_hwmon_temp *temp;
-
- mutex_lock(&thermal_list_lock);
- list_for_each_entry(temp, &hwmon->tz_list, hwmon_node)
- if (temp->tz == tz) {
- mutex_unlock(&thermal_list_lock);
- return temp;
- }
- mutex_unlock(&thermal_list_lock);
-
- return NULL;
-}
-
-static int
-thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
-{
- struct thermal_hwmon_device *hwmon;
- struct thermal_hwmon_temp *temp;
- int new_hwmon_device = 1;
- int result;
-
- hwmon = thermal_hwmon_lookup_by_type(tz);
- if (hwmon) {
- new_hwmon_device = 0;
- goto register_sys_interface;
- }
-
- hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL);
- if (!hwmon)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&hwmon->tz_list);
- strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
- hwmon->device = hwmon_device_register(NULL);
- if (IS_ERR(hwmon->device)) {
- result = PTR_ERR(hwmon->device);
- goto free_mem;
- }
- dev_set_drvdata(hwmon->device, hwmon);
- result = device_create_file(hwmon->device, &dev_attr_name);
- if (result)
- goto free_mem;
-
- register_sys_interface:
- temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL);
- if (!temp) {
- result = -ENOMEM;
- goto unregister_name;
- }
-
- temp->tz = tz;
- hwmon->count++;
-
- snprintf(temp->temp_input.name, sizeof(temp->temp_input.name),
- "temp%d_input", hwmon->count);
- temp->temp_input.attr.attr.name = temp->temp_input.name;
- temp->temp_input.attr.attr.mode = 0444;
- temp->temp_input.attr.show = temp_input_show;
- sysfs_attr_init(&temp->temp_input.attr.attr);
- result = device_create_file(hwmon->device, &temp->temp_input.attr);
- if (result)
- goto free_temp_mem;
-
- if (tz->ops->get_crit_temp) {
- unsigned long temperature;
- if (!tz->ops->get_crit_temp(tz, &temperature)) {
- snprintf(temp->temp_crit.name,
- sizeof(temp->temp_crit.name),
- "temp%d_crit", hwmon->count);
- temp->temp_crit.attr.attr.name = temp->temp_crit.name;
- temp->temp_crit.attr.attr.mode = 0444;
- temp->temp_crit.attr.show = temp_crit_show;
- sysfs_attr_init(&temp->temp_crit.attr.attr);
- result = device_create_file(hwmon->device,
- &temp->temp_crit.attr);
- if (result)
- goto unregister_input;
- }
- }
-
- mutex_lock(&thermal_list_lock);
- if (new_hwmon_device)
- list_add_tail(&hwmon->node, &thermal_hwmon_list);
- list_add_tail(&temp->hwmon_node, &hwmon->tz_list);
- mutex_unlock(&thermal_list_lock);
-
- return 0;
-
- unregister_input:
- device_remove_file(hwmon->device, &temp->temp_input.attr);
- free_temp_mem:
- kfree(temp);
- unregister_name:
- if (new_hwmon_device) {
- device_remove_file(hwmon->device, &dev_attr_name);
- hwmon_device_unregister(hwmon->device);
- }
- free_mem:
- if (new_hwmon_device)
- kfree(hwmon);
-
- return result;
-}
-
-static void
-thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
-{
- struct thermal_hwmon_device *hwmon;
- struct thermal_hwmon_temp *temp;
-
- hwmon = thermal_hwmon_lookup_by_type(tz);
- if (unlikely(!hwmon)) {
- /* Should never happen... */
- dev_dbg(&tz->device, "hwmon device lookup failed!\n");
- return;
- }
-
- temp = thermal_hwmon_lookup_temp(hwmon, tz);
- if (unlikely(!temp)) {
- /* Should never happen... */
- dev_dbg(&tz->device, "temperature input lookup failed!\n");
- return;
- }
-
- device_remove_file(hwmon->device, &temp->temp_input.attr);
- if (tz->ops->get_crit_temp)
- device_remove_file(hwmon->device, &temp->temp_crit.attr);
-
- mutex_lock(&thermal_list_lock);
- list_del(&temp->hwmon_node);
- kfree(temp);
- if (!list_empty(&hwmon->tz_list)) {
- mutex_unlock(&thermal_list_lock);
- return;
- }
- list_del(&hwmon->node);
- mutex_unlock(&thermal_list_lock);
-
- device_remove_file(hwmon->device, &dev_attr_name);
- hwmon_device_unregister(hwmon->device);
- kfree(hwmon);
-}
-#else
-static int
-thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
-{
- return 0;
-}
-
-static void
-thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
-{
-}
-#endif
-
/**
* thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone
* @tz: pointer to struct thermal_zone_device
@@ -1715,9 +1473,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
mutex_unlock(&thermal_governor_lock);
- result = thermal_add_hwmon_sysfs(tz);
- if (result)
- goto unregister;
+ if (!tz->tzp || !tz->tzp->no_hwmon) {
+ result = thermal_add_hwmon_sysfs(tz);
+ if (result)
+ goto unregister;
+ }
mutex_lock(&thermal_list_lock);
list_add_tail(&tz->node, &thermal_tz_list);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
new file mode 100644
index 0000000..eeef0e2
--- /dev/null
+++ b/drivers/thermal/thermal_hwmon.c
@@ -0,0 +1,269 @@
+/*
+ * thermal_hwmon.c - Generic Thermal Management hwmon support.
+ *
+ * Code based on Intel thermal_core.c. Copyrights of the original code:
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/hwmon.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include "thermal_hwmon.h"
+
+/* hwmon sys I/F */
+/* thermal zone devices with the same type share one hwmon device */
+struct thermal_hwmon_device {
+ char type[THERMAL_NAME_LENGTH];
+ struct device *device;
+ int count;
+ struct list_head tz_list;
+ struct list_head node;
+};
+
+struct thermal_hwmon_attr {
+ struct device_attribute attr;
+ char name[16];
+};
+
+/* one temperature input for each thermal zone */
+struct thermal_hwmon_temp {
+ struct list_head hwmon_node;
+ struct thermal_zone_device *tz;
+ struct thermal_hwmon_attr temp_input; /* hwmon sys attr */
+ struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */
+};
+
+static LIST_HEAD(thermal_hwmon_list);
+
+static DEFINE_MUTEX(thermal_hwmon_list_lock);
+
+static ssize_t
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", hwmon->type);
+}
+static DEVICE_ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ long temperature;
+ int ret;
+ struct thermal_hwmon_attr *hwmon_attr
+ = container_of(attr, struct thermal_hwmon_attr, attr);
+ struct thermal_hwmon_temp *temp
+ = container_of(hwmon_attr, struct thermal_hwmon_temp,
+ temp_input);
+ struct thermal_zone_device *tz = temp->tz;
+
+ ret = thermal_zone_get_temp(tz, &temperature);
+
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%ld\n", temperature);
+}
+
+static ssize_t
+temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_hwmon_attr *hwmon_attr
+ = container_of(attr, struct thermal_hwmon_attr, attr);
+ struct thermal_hwmon_temp *temp
+ = container_of(hwmon_attr, struct thermal_hwmon_temp,
+ temp_crit);
+ struct thermal_zone_device *tz = temp->tz;
+ long temperature;
+ int ret;
+
+ ret = tz->ops->get_trip_temp(tz, 0, &temperature);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%ld\n", temperature);
+}
+
+
+static struct thermal_hwmon_device *
+thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_device *hwmon;
+
+ mutex_lock(&thermal_hwmon_list_lock);
+ list_for_each_entry(hwmon, &thermal_hwmon_list, node)
+ if (!strcmp(hwmon->type, tz->type)) {
+ mutex_unlock(&thermal_hwmon_list_lock);
+ return hwmon;
+ }
+ mutex_unlock(&thermal_hwmon_list_lock);
+
+ return NULL;
+}
+
+/* Find the temperature input matching a given thermal zone */
+static struct thermal_hwmon_temp *
+thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
+ const struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_temp *temp;
+
+ mutex_lock(&thermal_hwmon_list_lock);
+ list_for_each_entry(temp, &hwmon->tz_list, hwmon_node)
+ if (temp->tz == tz) {
+ mutex_unlock(&thermal_hwmon_list_lock);
+ return temp;
+ }
+ mutex_unlock(&thermal_hwmon_list_lock);
+
+ return NULL;
+}
+
+int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_device *hwmon;
+ struct thermal_hwmon_temp *temp;
+ int new_hwmon_device = 1;
+ int result;
+
+ hwmon = thermal_hwmon_lookup_by_type(tz);
+ if (hwmon) {
+ new_hwmon_device = 0;
+ goto register_sys_interface;
+ }
+
+ hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hwmon->tz_list);
+ strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
+ hwmon->device = hwmon_device_register(&tz->device);
+ if (IS_ERR(hwmon->device)) {
+ result = PTR_ERR(hwmon->device);
+ goto free_mem;
+ }
+ dev_set_drvdata(hwmon->device, hwmon);
+ result = device_create_file(hwmon->device, &dev_attr_name);
+ if (result)
+ goto free_mem;
+
+ register_sys_interface:
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp) {
+ result = -ENOMEM;
+ goto unregister_name;
+ }
+
+ temp->tz = tz;
+ hwmon->count++;
+
+ snprintf(temp->temp_input.name, sizeof(temp->temp_input.name),
+ "temp%d_input", hwmon->count);
+ temp->temp_input.attr.attr.name = temp->temp_input.name;
+ temp->temp_input.attr.attr.mode = 0444;
+ temp->temp_input.attr.show = temp_input_show;
+ sysfs_attr_init(&temp->temp_input.attr.attr);
+ result = device_create_file(hwmon->device, &temp->temp_input.attr);
+ if (result)
+ goto free_temp_mem;
+
+ if (tz->ops->get_crit_temp) {
+ unsigned long temperature;
+ if (!tz->ops->get_crit_temp(tz, &temperature)) {
+ snprintf(temp->temp_crit.name,
+ sizeof(temp->temp_crit.name),
+ "temp%d_crit", hwmon->count);
+ temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+ temp->temp_crit.attr.attr.mode = 0444;
+ temp->temp_crit.attr.show = temp_crit_show;
+ sysfs_attr_init(&temp->temp_crit.attr.attr);
+ result = device_create_file(hwmon->device,
+ &temp->temp_crit.attr);
+ if (result)
+ goto unregister_input;
+ }
+ }
+
+ mutex_lock(&thermal_hwmon_list_lock);
+ if (new_hwmon_device)
+ list_add_tail(&hwmon->node, &thermal_hwmon_list);
+ list_add_tail(&temp->hwmon_node, &hwmon->tz_list);
+ mutex_unlock(&thermal_hwmon_list_lock);
+
+ return 0;
+
+ unregister_input:
+ device_remove_file(hwmon->device, &temp->temp_input.attr);
+ free_temp_mem:
+ kfree(temp);
+ unregister_name:
+ if (new_hwmon_device) {
+ device_remove_file(hwmon->device, &dev_attr_name);
+ hwmon_device_unregister(hwmon->device);
+ }
+ free_mem:
+ if (new_hwmon_device)
+ kfree(hwmon);
+
+ return result;
+}
+
+void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_device *hwmon;
+ struct thermal_hwmon_temp *temp;
+
+ hwmon = thermal_hwmon_lookup_by_type(tz);
+ if (unlikely(!hwmon)) {
+ /* Should never happen... */
+ dev_dbg(&tz->device, "hwmon device lookup failed!\n");
+ return;
+ }
+
+ temp = thermal_hwmon_lookup_temp(hwmon, tz);
+ if (unlikely(!temp)) {
+ /* Should never happen... */
+ dev_dbg(&tz->device, "temperature input lookup failed!\n");
+ return;
+ }
+
+ device_remove_file(hwmon->device, &temp->temp_input.attr);
+ if (tz->ops->get_crit_temp)
+ device_remove_file(hwmon->device, &temp->temp_crit.attr);
+
+ mutex_lock(&thermal_hwmon_list_lock);
+ list_del(&temp->hwmon_node);
+ kfree(temp);
+ if (!list_empty(&hwmon->tz_list)) {
+ mutex_unlock(&thermal_hwmon_list_lock);
+ return;
+ }
+ list_del(&hwmon->node);
+ mutex_unlock(&thermal_hwmon_list_lock);
+
+ device_remove_file(hwmon->device, &dev_attr_name);
+ hwmon_device_unregister(hwmon->device);
+ kfree(hwmon);
+}
diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
new file mode 100644
index 0000000..c798fdb
--- /dev/null
+++ b/drivers/thermal/thermal_hwmon.h
@@ -0,0 +1,49 @@
+/*
+ * thermal_hwmon.h - Generic Thermal Management hwmon support.
+ *
+ * Code based on Intel thermal_core.c. Copyrights of the original code:
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#ifndef __THERMAL_HWMON_H__
+#define __THERMAL_HWMON_H__
+
+#include <linux/thermal.h>
+
+#ifdef CONFIG_THERMAL_HWMON
+int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
+void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
+#else
+static int
+thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ return 0;
+}
+
+static void
+thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+}
+#endif
+
+#endif /* __THERMAL_HWMON_H__ */
diff --git a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
index e5d8326..a492927 100644
--- a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
@@ -42,6 +42,7 @@ dra752_core_temp_sensor_registers = {
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_CORE_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_CORE_MASK,
.mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
+ .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_CORE_MASK,
.mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_CORE_MASK,
.mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_CORE_MASK,
@@ -77,6 +78,7 @@ dra752_iva_temp_sensor_registers = {
.mask_hot_mask = DRA752_BANDGAP_CTRL_2_MASK_HOT_IVA_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_2_MASK_COLD_IVA_MASK,
.mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
+ .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_2_FREEZE_IVA_MASK,
.mask_clear_mask = DRA752_BANDGAP_CTRL_2_CLEAR_IVA_MASK,
.mask_clear_accum_mask = DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_IVA_MASK,
@@ -112,6 +114,7 @@ dra752_mpu_temp_sensor_registers = {
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_MPU_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_MPU_MASK,
.mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
+ .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_MPU_MASK,
.mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_MPU_MASK,
.mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_MPU_MASK,
@@ -147,6 +150,7 @@ dra752_dspeve_temp_sensor_registers = {
.mask_hot_mask = DRA752_BANDGAP_CTRL_2_MASK_HOT_DSPEVE_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_2_MASK_COLD_DSPEVE_MASK,
.mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
+ .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_2_FREEZE_DSPEVE_MASK,
.mask_clear_mask = DRA752_BANDGAP_CTRL_2_CLEAR_DSPEVE_MASK,
.mask_clear_accum_mask = DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_DSPEVE_MASK,
@@ -182,6 +186,7 @@ dra752_gpu_temp_sensor_registers = {
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_GPU_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_GPU_MASK,
.mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
+ .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_GPU_MASK,
.mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_GPU_MASK,
.mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_GPU_MASK,
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 9dfd471..74c0e34 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1020,9 +1020,13 @@ int ti_bandgap_get_trend(struct ti_bandgap *bgp, int id, int *trend)
/* Fetch the update interval */
ret = ti_bandgap_read_update_interval(bgp, id, &interval);
- if (ret || !interval)
+ if (ret)
goto unfreeze;
+ /* Set the interval to 1 ms if bandgap counter delay is not set */
+ if (interval == 0)
+ interval = 1;
+
*trend = (t1 - t2) / interval;
dev_dbg(bgp->dev, "The temperatures are t1 = %d and t2 = %d and trend =%d\n",
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 4c5f55c37..4f8b9af 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -174,6 +174,9 @@ static int ti_thermal_set_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode mode)
{
struct ti_thermal_data *data = thermal->devdata;
+ struct ti_bandgap *bgp;
+
+ bgp = data->bgp;
if (!data->ti_thermal) {
dev_notice(&thermal->device, "thermal zone not registered\n");
@@ -190,6 +193,8 @@ static int ti_thermal_set_mode(struct thermal_zone_device *thermal,
mutex_unlock(&data->ti_thermal->lock);
data->mode = mode;
+ ti_bandgap_write_update_interval(bgp, data->sensor_id,
+ data->ti_thermal->polling_delay);
thermal_zone_device_update(data->ti_thermal);
dev_dbg(&thermal->device, "thermal polling set for duration=%d msec\n",
data->ti_thermal->polling_delay);
@@ -313,6 +318,8 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
}
data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE;
ti_bandgap_set_sensor_data(bgp, id, data);
+ ti_bandgap_write_update_interval(bgp, data->sensor_id,
+ data->ti_thermal->polling_delay);
return 0;
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 47c6e7b..febd45c 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -5,7 +5,7 @@
if TTY
menu "Serial drivers"
- depends on HAS_IOMEM && GENERIC_HARDIRQS
+ depends on HAS_IOMEM
source "drivers/tty/serial/8250/Kconfig"
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index a9355ce..3a1a01a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -854,7 +854,8 @@ void disassociate_ctty(int on_exit)
struct pid *tty_pgrp = tty_get_pgrp(tty);
if (tty_pgrp) {
kill_pgrp(tty_pgrp, SIGHUP, on_exit);
- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+ if (!on_exit)
+ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
put_pid(tty_pgrp);
}
}
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index f969ea2..b870872 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,6 +1,6 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
- depends on (USB || USB_GADGET) && GENERIC_HARDIRQS && HAS_DMA
+ depends on (USB || USB_GADGET) && HAS_DMA
depends on EXTCON
select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
help
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 30e2dd8..48cddf3 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -313,7 +313,7 @@ config USB_S3C_HSUDC
config USB_MV_UDC
tristate "Marvell USB2.0 Device Controller"
- depends on GENERIC_HARDIRQS && HAS_DMA
+ depends on HAS_DMA
help
Marvell Socs (including PXA and MMP series) include a high speed
USB2.0 OTG controller, which can be configured as high speed or
@@ -425,7 +425,7 @@ config USB_GOKU
config USB_EG20T
tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
help
This is a USB device driver for EG20T PCH.
EG20T PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 465ef8e..b94c049 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -524,7 +524,7 @@ struct kiocb_priv {
unsigned actual;
};
-static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
+static int ep_aio_cancel(struct kiocb *iocb)
{
struct kiocb_priv *priv = iocb->private;
struct ep_data *epdata;
@@ -540,7 +540,6 @@ static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
// spin_unlock(&epdata->dev->lock);
local_irq_enable();
- aio_put_req(iocb);
return value;
}
@@ -709,11 +708,11 @@ ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
return -EINVAL;
- buf = kmalloc(iocb->ki_left, GFP_KERNEL);
+ buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
if (unlikely(!buf))
return -ENOMEM;
- return ep_aio_rwtail(iocb, buf, iocb->ki_left, epdata, iov, nr_segs);
+ return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
}
static ssize_t
@@ -728,7 +727,7 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
return -EINVAL;
- buf = kmalloc(iocb->ki_left, GFP_KERNEL);
+ buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
if (unlikely(!buf))
return -ENOMEM;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 5be0326..b3f20d7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -278,7 +278,6 @@ endif # USB_EHCI_HCD
config USB_OXU210HP_HCD
tristate "OXU210HP HCD support"
- depends on GENERIC_HARDIRQS
---help---
The OXU210HP is an USB host/OTG/device controller. Enable this
option if your board has this chip. If unsure, say N.
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index c64ee09a7..c258a97 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -71,7 +71,6 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
- depends on GENERIC_HARDIRQS
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index 019bf7e..1c4195a 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -4,7 +4,7 @@
config USB_RENESAS_USBHS
tristate 'Renesas USBHS controller'
- depends on USB_GADGET && GENERIC_HARDIRQS
+ depends on USB_GADGET
default n
help
Renesas USBHS is a discrete USB host and peripheral controller chip
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index cef6002..6ab71b9 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/eventfd.h>
+#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
@@ -227,6 +228,110 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
return 0;
}
+static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
+{
+ (*(int *)data)++;
+ return 0;
+}
+
+struct vfio_pci_fill_info {
+ int max;
+ int cur;
+ struct vfio_pci_dependent_device *devices;
+};
+
+static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_fill_info *fill = data;
+ struct iommu_group *iommu_group;
+
+ if (fill->cur == fill->max)
+ return -EAGAIN; /* Something changed, try again */
+
+ iommu_group = iommu_group_get(&pdev->dev);
+ if (!iommu_group)
+ return -EPERM; /* Cannot reset non-isolated devices */
+
+ fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
+ fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
+ fill->devices[fill->cur].bus = pdev->bus->number;
+ fill->devices[fill->cur].devfn = pdev->devfn;
+ fill->cur++;
+ iommu_group_put(iommu_group);
+ return 0;
+}
+
+struct vfio_pci_group_entry {
+ struct vfio_group *group;
+ int id;
+};
+
+struct vfio_pci_group_info {
+ int count;
+ struct vfio_pci_group_entry *groups;
+};
+
+static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_group_info *info = data;
+ struct iommu_group *group;
+ int id, i;
+
+ group = iommu_group_get(&pdev->dev);
+ if (!group)
+ return -EPERM;
+
+ id = iommu_group_id(group);
+
+ for (i = 0; i < info->count; i++)
+ if (info->groups[i].id == id)
+ break;
+
+ iommu_group_put(group);
+
+ return (i == info->count) ? -EINVAL : 0;
+}
+
+static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
+{
+ for (; pdev; pdev = pdev->bus->self)
+ if (pdev->bus == slot->bus)
+ return (pdev->slot == slot);
+ return false;
+}
+
+struct vfio_pci_walk_info {
+ int (*fn)(struct pci_dev *, void *data);
+ void *data;
+ struct pci_dev *pdev;
+ bool slot;
+ int ret;
+};
+
+static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_walk_info *walk = data;
+
+ if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
+ walk->ret = walk->fn(pdev, walk->data);
+
+ return walk->ret;
+}
+
+static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
+ int (*fn)(struct pci_dev *,
+ void *data), void *data,
+ bool slot)
+{
+ struct vfio_pci_walk_info walk = {
+ .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
+ };
+
+ pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
+
+ return walk.ret;
+}
+
static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
@@ -407,10 +512,189 @@ static long vfio_pci_ioctl(void *device_data,
return ret;
- } else if (cmd == VFIO_DEVICE_RESET)
+ } else if (cmd == VFIO_DEVICE_RESET) {
return vdev->reset_works ?
pci_reset_function(vdev->pdev) : -EINVAL;
+ } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
+ struct vfio_pci_hot_reset_info hdr;
+ struct vfio_pci_fill_info fill = { 0 };
+ struct vfio_pci_dependent_device *devices = NULL;
+ bool slot = false;
+ int ret = 0;
+
+ minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz)
+ return -EINVAL;
+
+ hdr.flags = 0;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ /* How many devices are affected? */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_count_devs,
+ &fill.max, slot);
+ if (ret)
+ return ret;
+
+ WARN_ON(!fill.max); /* Should always be at least one */
+
+ /*
+ * If there's enough space, fill it now, otherwise return
+ * -ENOSPC and the number of devices affected.
+ */
+ if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
+ ret = -ENOSPC;
+ hdr.count = fill.max;
+ goto reset_info_exit;
+ }
+
+ devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
+ if (!devices)
+ return -ENOMEM;
+
+ fill.devices = devices;
+
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_fill_devs,
+ &fill, slot);
+
+ /*
+ * If a device was removed between counting and filling,
+ * we may come up short of fill.max. If a device was
+ * added, we'll have a return of -EAGAIN above.
+ */
+ if (!ret)
+ hdr.count = fill.cur;
+
+reset_info_exit:
+ if (copy_to_user((void __user *)arg, &hdr, minsz))
+ ret = -EFAULT;
+
+ if (!ret) {
+ if (copy_to_user((void __user *)(arg + minsz), devices,
+ hdr.count * sizeof(*devices)))
+ ret = -EFAULT;
+ }
+
+ kfree(devices);
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
+ struct vfio_pci_hot_reset hdr;
+ int32_t *group_fds;
+ struct vfio_pci_group_entry *groups;
+ struct vfio_pci_group_info info;
+ bool slot = false;
+ int i, count = 0, ret = 0;
+
+ minsz = offsetofend(struct vfio_pci_hot_reset, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.flags)
+ return -EINVAL;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ /*
+ * We can't let userspace give us an arbitrarily large
+ * buffer to copy, so verify how many we think there
+ * could be. Note groups can have multiple devices so
+ * one group per device is the max.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_count_devs,
+ &count, slot);
+ if (ret)
+ return ret;
+
+ /* Somewhere between 1 and count is OK */
+ if (!hdr.count || hdr.count > count)
+ return -EINVAL;
+
+ group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
+ groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
+ if (!group_fds || !groups) {
+ kfree(group_fds);
+ kfree(groups);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(group_fds, (void __user *)(arg + minsz),
+ hdr.count * sizeof(*group_fds))) {
+ kfree(group_fds);
+ kfree(groups);
+ return -EFAULT;
+ }
+
+ /*
+ * For each group_fd, get the group through the vfio external
+ * user interface and store the group and iommu ID. This
+ * ensures the group is held across the reset.
+ */
+ for (i = 0; i < hdr.count; i++) {
+ struct vfio_group *group;
+ struct fd f = fdget(group_fds[i]);
+ if (!f.file) {
+ ret = -EBADF;
+ break;
+ }
+
+ group = vfio_group_get_external_user(f.file);
+ fdput(f);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ break;
+ }
+
+ groups[i].group = group;
+ groups[i].id = vfio_external_user_iommu_id(group);
+ }
+
+ kfree(group_fds);
+
+ /* release reference to groups on error */
+ if (ret)
+ goto hot_reset_release;
+
+ info.count = hdr.count;
+ info.groups = groups;
+
+ /*
+ * Test whether all the affected devices are contained
+ * by the set of groups provided by the user.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_validate_devs,
+ &info, slot);
+ if (!ret)
+ /* User has access, do the reset */
+ ret = slot ? pci_reset_slot(vdev->pdev->slot) :
+ pci_reset_bus(vdev->pdev->bus);
+
+hot_reset_release:
+ for (i--; i >= 0; i--)
+ vfio_group_put_external_user(groups[i].group);
+
+ kfree(groups);
+ return ret;
+ }
+
return -ENOTTY;
}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index affa347..ffd0632 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1012,6 +1012,7 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
{
struct pci_dev *pdev = vdev->pdev;
+ u32 dword;
u16 word;
u8 byte;
int ret;
@@ -1025,7 +1026,9 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
return pcibios_err_to_errno(ret);
if (PCI_X_CMD_VERSION(word)) {
- vdev->extended_caps = true;
+ /* Test for extended capabilities */
+ pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
+ vdev->extended_caps = (dword != 0);
return PCI_CAP_PCIX_SIZEOF_V2;
} else
return PCI_CAP_PCIX_SIZEOF_V0;
@@ -1037,9 +1040,11 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
return byte;
case PCI_CAP_ID_EXP:
- /* length based on version */
- vdev->extended_caps = true;
+ /* Test for extended capabilities */
+ pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
+ vdev->extended_caps = (dword != 0);
+ /* length based on version */
if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1)
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
else
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 4bc704e..641bc87 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -130,8 +130,8 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
void (*thread)(struct vfio_pci_device *, void *),
void *data, struct virqfd **pvirqfd, int fd)
{
- struct file *file = NULL;
- struct eventfd_ctx *ctx = NULL;
+ struct fd irqfd;
+ struct eventfd_ctx *ctx;
struct virqfd *virqfd;
int ret = 0;
unsigned int events;
@@ -149,16 +149,16 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
INIT_WORK(&virqfd->inject, virqfd_inject);
- file = eventfd_fget(fd);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto fail;
+ irqfd = fdget(fd);
+ if (!irqfd.file) {
+ ret = -EBADF;
+ goto err_fd;
}
- ctx = eventfd_ctx_fileget(file);
+ ctx = eventfd_ctx_fileget(irqfd.file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
- goto fail;
+ goto err_ctx;
}
virqfd->eventfd = ctx;
@@ -174,7 +174,7 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
if (*pvirqfd) {
spin_unlock_irq(&vdev->irqlock);
ret = -EBUSY;
- goto fail;
+ goto err_busy;
}
*pvirqfd = virqfd;
@@ -187,7 +187,7 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
- events = file->f_op->poll(file, &virqfd->pt);
+ events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);
/*
* Check if there was an event already pending on the eventfd
@@ -202,17 +202,14 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
* Do not drop the file until the irqfd is fully initialized,
* otherwise we might race against the POLLHUP.
*/
- fput(file);
+ fdput(irqfd);
return 0;
-
-fail:
- if (ctx && !IS_ERR(ctx))
- eventfd_ctx_put(ctx);
-
- if (file && !IS_ERR(file))
- fput(file);
-
+err_busy:
+ eventfd_ctx_put(ctx);
+err_ctx:
+ fdput(irqfd);
+err_fd:
kfree(virqfd);
return ret;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 842f450..1eab4ac 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1109,7 +1109,7 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
* We can't use anon_inode_getfd() because we need to modify
* the f_mode flags directly to allow more than just ioctls
*/
- ret = get_unused_fd();
+ ret = get_unused_fd_flags(O_CLOEXEC);
if (ret < 0) {
device->ops->release(device->device_data);
break;
@@ -1353,6 +1353,68 @@ static const struct file_operations vfio_device_fops = {
};
/**
+ * External user API, exported by symbols to be linked dynamically.
+ *
+ * The protocol includes:
+ * 1. do normal VFIO init operation:
+ * - opening a new container;
+ * - attaching group(s) to it;
+ * - setting an IOMMU driver for a container.
+ * When IOMMU is set for a container, all groups in it are
+ * considered ready to use by an external user.
+ *
+ * 2. User space passes a group fd to an external user.
+ * The external user calls vfio_group_get_external_user()
+ * to verify that:
+ * - the group is initialized;
+ * - IOMMU is set for it.
+ * If both checks passed, vfio_group_get_external_user()
+ * increments the container user counter to prevent
+ * the VFIO group from disposal before KVM exits.
+ *
+ * 3. The external user calls vfio_external_user_iommu_id()
+ * to know an IOMMU ID.
+ *
+ * 4. When the external KVM finishes, it calls
+ * vfio_group_put_external_user() to release the VFIO group.
+ * This call decrements the container user counter.
+ */
+struct vfio_group *vfio_group_get_external_user(struct file *filep)
+{
+ struct vfio_group *group = filep->private_data;
+
+ if (filep->f_op != &vfio_group_fops)
+ return ERR_PTR(-EINVAL);
+
+ if (!atomic_inc_not_zero(&group->container_users))
+ return ERR_PTR(-EINVAL);
+
+ if (!group->container->iommu_driver ||
+ !vfio_group_viable(group)) {
+ atomic_dec(&group->container_users);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vfio_group_get(group);
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
+
+void vfio_group_put_external_user(struct vfio_group *group)
+{
+ vfio_group_put(group);
+ vfio_group_try_dissolve_container(group);
+}
+EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+
+int vfio_external_user_iommu_id(struct vfio_group *group)
+{
+ return iommu_group_id(group->iommu_group);
+}
+EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
+
+/**
* Module/class support
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0c27c7d..4b79a1f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1,12 +1,12 @@
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
- * (C) Copyright 2010-2012 RisingTide Systems LLC.
+ * (C) Copyright 2010-2013 Datera, Inc.
* (C) Copyright 2010-2012 IBM Corp.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
- * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ * Authors: Nicholas A. Bellinger <nab@daterainc.com>
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -48,12 +48,16 @@
#include <linux/virtio_scsi.h>
#include <linux/llist.h>
#include <linux/bitmap.h>
+#include <linux/percpu_ida.h>
#include "vhost.h"
#define TCM_VHOST_VERSION "v0.1"
#define TCM_VHOST_NAMELEN 256
#define TCM_VHOST_MAX_CDB_SIZE 32
+#define TCM_VHOST_DEFAULT_TAGS 256
+#define TCM_VHOST_PREALLOC_SGLS 2048
+#define TCM_VHOST_PREALLOC_PAGES 2048
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
@@ -79,6 +83,7 @@ struct tcm_vhost_cmd {
u32 tvc_lun;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist *tvc_sgl;
+ struct page **tvc_upages;
/* Pointer to response */
struct virtio_scsi_cmd_resp __user *tvc_resp;
/* Pointer to vhost_scsi for our device */
@@ -450,17 +455,16 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
struct tcm_vhost_cmd, tvc_se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
if (tv_cmd->tvc_sgl_count) {
u32 i;
for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
put_page(sg_page(&tv_cmd->tvc_sgl[i]));
-
- kfree(tv_cmd->tvc_sgl);
}
tcm_vhost_put_inflight(tv_cmd->inflight);
- kfree(tv_cmd);
+ percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
static int tcm_vhost_shutdown_session(struct se_session *se_sess)
@@ -704,7 +708,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
static struct tcm_vhost_cmd *
-vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
+vhost_scsi_get_tag(struct vhost_virtqueue *vq,
struct tcm_vhost_tpg *tpg,
struct virtio_scsi_cmd_req *v_req,
u32 exp_data_len,
@@ -712,18 +716,27 @@ vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
{
struct tcm_vhost_cmd *cmd;
struct tcm_vhost_nexus *tv_nexus;
+ struct se_session *se_sess;
+ struct scatterlist *sg;
+ struct page **pages;
+ int tag;
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Unable to locate active struct tcm_vhost_nexus\n");
return ERR_PTR(-EIO);
}
+ se_sess = tv_nexus->tvn_se_sess;
- cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
- if (!cmd) {
- pr_err("Unable to allocate struct tcm_vhost_cmd\n");
- return ERR_PTR(-ENOMEM);
- }
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL);
+ cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
+ sg = cmd->tvc_sgl;
+ pages = cmd->tvc_upages;
+ memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
+
+ cmd->tvc_sgl = sg;
+ cmd->tvc_upages = pages;
+ cmd->tvc_se_cmd.map_tag = tag;
cmd->tvc_tag = v_req->tag;
cmd->tvc_task_attr = v_req->task_attr;
cmd->tvc_exp_data_len = exp_data_len;
@@ -740,7 +753,8 @@ vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
* Returns the number of scatterlist entries used or -errno on error.
*/
static int
-vhost_scsi_map_to_sgl(struct scatterlist *sgl,
+vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+ struct scatterlist *sgl,
unsigned int sgl_count,
struct iovec *iov,
int write)
@@ -752,13 +766,25 @@ vhost_scsi_map_to_sgl(struct scatterlist *sgl,
struct page **pages;
int ret, i;
+ if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
+ pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
+ " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
+ sgl_count, TCM_VHOST_PREALLOC_SGLS);
+ return -ENOBUFS;
+ }
+
pages_nr = iov_num_pages(iov);
if (pages_nr > sgl_count)
return -ENOBUFS;
- pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ if (pages_nr > TCM_VHOST_PREALLOC_PAGES) {
+ pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
+ " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n",
+ pages_nr, TCM_VHOST_PREALLOC_PAGES);
+ return -ENOBUFS;
+ }
+
+ pages = tv_cmd->tvc_upages;
ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
/* No pages were pinned */
@@ -783,7 +809,6 @@ vhost_scsi_map_to_sgl(struct scatterlist *sgl,
}
out:
- kfree(pages);
return ret;
}
@@ -807,24 +832,20 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
/* TODO overflow checking */
- sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
- if (!sg)
- return -ENOMEM;
- pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
- sg, sgl_count, !sg);
+ sg = cmd->tvc_sgl;
+ pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
sg_init_table(sg, sgl_count);
- cmd->tvc_sgl = sg;
cmd->tvc_sgl_count = sgl_count;
pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
for (i = 0; i < niov; i++) {
- ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
+ ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
+ write);
if (ret < 0) {
for (i = 0; i < cmd->tvc_sgl_count; i++)
put_page(sg_page(&cmd->tvc_sgl[i]));
- kfree(cmd->tvc_sgl);
- cmd->tvc_sgl = NULL;
+
cmd->tvc_sgl_count = 0;
return ret;
}
@@ -989,10 +1010,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
for (i = 0; i < data_num; i++)
exp_data_len += vq->iov[data_first + i].iov_len;
- cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req,
- exp_data_len, data_direction);
+ cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
+ exp_data_len, data_direction);
if (IS_ERR(cmd)) {
- vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
+ vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
PTR_ERR(cmd));
goto err_cmd;
}
@@ -1654,11 +1675,31 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
kfree(nacl);
}
+static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
+ struct se_session *se_sess)
+{
+ struct tcm_vhost_cmd *tv_cmd;
+ unsigned int i;
+
+ if (!se_sess->sess_cmd_map)
+ return;
+
+ for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
+ tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+
+ kfree(tv_cmd->tvc_sgl);
+ kfree(tv_cmd->tvc_upages);
+ }
+}
+
static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
const char *name)
{
struct se_portal_group *se_tpg;
+ struct se_session *se_sess;
struct tcm_vhost_nexus *tv_nexus;
+ struct tcm_vhost_cmd *tv_cmd;
+ unsigned int i;
mutex_lock(&tpg->tv_tpg_mutex);
if (tpg->tpg_nexus) {
@@ -1675,14 +1716,37 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
return -ENOMEM;
}
/*
- * Initialize the struct se_session pointer
+ * Initialize the struct se_session pointer and setup tagpool
+ * for struct tcm_vhost_cmd descriptors
*/
- tv_nexus->tvn_se_sess = transport_init_session();
+ tv_nexus->tvn_se_sess = transport_init_session_tags(
+ TCM_VHOST_DEFAULT_TAGS,
+ sizeof(struct tcm_vhost_cmd));
if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
return -ENOMEM;
}
+ se_sess = tv_nexus->tvn_se_sess;
+ for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
+ tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+
+ tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
+ TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
+ if (!tv_cmd->tvc_sgl) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
+ goto out;
+ }
+
+ tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
+ TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL);
+ if (!tv_cmd->tvc_upages) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to allocate tv_cmd->tvc_upages\n");
+ goto out;
+ }
+ }
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_vhost struct se_portal_group with
@@ -1694,9 +1758,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
mutex_unlock(&tpg->tv_tpg_mutex);
pr_debug("core_tpg_check_initiator_node_acl() failed"
" for %s\n", name);
- transport_free_session(tv_nexus->tvn_se_sess);
- kfree(tv_nexus);
- return -ENOMEM;
+ goto out;
}
/*
* Now register the TCM vhost virtual I_T Nexus as active with the
@@ -1708,6 +1770,12 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
mutex_unlock(&tpg->tv_tpg_mutex);
return 0;
+
+out:
+ tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+ transport_free_session(se_sess);
+ kfree(tv_nexus);
+ return -ENOMEM;
}
static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
@@ -1747,6 +1815,8 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+
+ tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 6488a73..7e8346e 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -38,14 +38,6 @@
#include "acornfb.h"
/*
- * VIDC machines can't do 16 or 32BPP modes.
- */
-#ifdef HAS_VIDC
-#undef FBCON_HAS_CFB16
-#undef FBCON_HAS_CFB32
-#endif
-
-/*
* Default resolution.
* NOTE that it has to be supported in the table towards
* the end of this file.
@@ -106,238 +98,6 @@ static struct vidc_timing current_vidc;
extern unsigned int vram_size; /* set by setup.c */
-#ifdef HAS_VIDC
-
-#define MAX_SIZE 480*1024
-
-/* CTL VIDC Actual
- * 24.000 0 8.000
- * 25.175 0 8.392
- * 36.000 0 12.000
- * 24.000 1 12.000
- * 25.175 1 12.588
- * 24.000 2 16.000
- * 25.175 2 16.783
- * 36.000 1 18.000
- * 24.000 3 24.000
- * 36.000 2 24.000
- * 25.175 3 25.175
- * 36.000 3 36.000
- */
-struct pixclock {
- u_long min_clock;
- u_long max_clock;
- u_int vidc_ctl;
- u_int vid_ctl;
-};
-
-static struct pixclock arc_clocks[] = {
- /* we allow +/-1% on these */
- { 123750, 126250, VIDC_CTRL_DIV3, VID_CTL_24MHz }, /* 8.000MHz */
- { 82500, 84167, VIDC_CTRL_DIV2, VID_CTL_24MHz }, /* 12.000MHz */
- { 61875, 63125, VIDC_CTRL_DIV1_5, VID_CTL_24MHz }, /* 16.000MHz */
- { 41250, 42083, VIDC_CTRL_DIV1, VID_CTL_24MHz }, /* 24.000MHz */
-};
-
-static struct pixclock *
-acornfb_valid_pixrate(struct fb_var_screeninfo *var)
-{
- u_long pixclock = var->pixclock;
- u_int i;
-
- if (!var->pixclock)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(arc_clocks); i++)
- if (pixclock > arc_clocks[i].min_clock &&
- pixclock < arc_clocks[i].max_clock)
- return arc_clocks + i;
-
- return NULL;
-}
-
-/* VIDC Rules:
- * hcr : must be even (interlace, hcr/2 must be even)
- * hswr : must be even
- * hdsr : must be odd
- * hder : must be odd
- *
- * vcr : must be odd
- * vswr : >= 1
- * vdsr : >= 1
- * vder : >= vdsr
- * if interlaced, then hcr/2 must be even
- */
-static void
-acornfb_set_timing(struct fb_var_screeninfo *var)
-{
- struct pixclock *pclk;
- struct vidc_timing vidc;
- u_int horiz_correction;
- u_int sync_len, display_start, display_end, cycle;
- u_int is_interlaced;
- u_int vid_ctl, vidc_ctl;
- u_int bandwidth;
-
- memset(&vidc, 0, sizeof(vidc));
-
- pclk = acornfb_valid_pixrate(var);
- vidc_ctl = pclk->vidc_ctl;
- vid_ctl = pclk->vid_ctl;
-
- bandwidth = var->pixclock * 8 / var->bits_per_pixel;
- /* 25.175, 4bpp = 79.444ns per byte, 317.776ns per word: fifo = 2,6 */
- if (bandwidth > 143500)
- vidc_ctl |= VIDC_CTRL_FIFO_3_7;
- else if (bandwidth > 71750)
- vidc_ctl |= VIDC_CTRL_FIFO_2_6;
- else if (bandwidth > 35875)
- vidc_ctl |= VIDC_CTRL_FIFO_1_5;
- else
- vidc_ctl |= VIDC_CTRL_FIFO_0_4;
-
- switch (var->bits_per_pixel) {
- case 1:
- horiz_correction = 19;
- vidc_ctl |= VIDC_CTRL_1BPP;
- break;
-
- case 2:
- horiz_correction = 11;
- vidc_ctl |= VIDC_CTRL_2BPP;
- break;
-
- case 4:
- horiz_correction = 7;
- vidc_ctl |= VIDC_CTRL_4BPP;
- break;
-
- default:
- case 8:
- horiz_correction = 5;
- vidc_ctl |= VIDC_CTRL_8BPP;
- break;
- }
-
- if (var->sync & FB_SYNC_COMP_HIGH_ACT) /* should be FB_SYNC_COMP */
- vidc_ctl |= VIDC_CTRL_CSYNC;
- else {
- if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
- vid_ctl |= VID_CTL_HS_NHSYNC;
-
- if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
- vid_ctl |= VID_CTL_VS_NVSYNC;
- }
-
- sync_len = var->hsync_len;
- display_start = sync_len + var->left_margin;
- display_end = display_start + var->xres;
- cycle = display_end + var->right_margin;
-
- /* if interlaced, then hcr/2 must be even */
- is_interlaced = (var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED;
-
- if (is_interlaced) {
- vidc_ctl |= VIDC_CTRL_INTERLACE;
- if (cycle & 2) {
- cycle += 2;
- var->right_margin += 2;
- }
- }
-
- vidc.h_cycle = (cycle - 2) / 2;
- vidc.h_sync_width = (sync_len - 2) / 2;
- vidc.h_border_start = (display_start - 1) / 2;
- vidc.h_display_start = (display_start - horiz_correction) / 2;
- vidc.h_display_end = (display_end - horiz_correction) / 2;
- vidc.h_border_end = (display_end - 1) / 2;
- vidc.h_interlace = (vidc.h_cycle + 1) / 2;
-
- sync_len = var->vsync_len;
- display_start = sync_len + var->upper_margin;
- display_end = display_start + var->yres;
- cycle = display_end + var->lower_margin;
-
- if (is_interlaced)
- cycle = (cycle - 3) / 2;
- else
- cycle = cycle - 1;
-
- vidc.v_cycle = cycle;
- vidc.v_sync_width = sync_len - 1;
- vidc.v_border_start = display_start - 1;
- vidc.v_display_start = vidc.v_border_start;
- vidc.v_display_end = display_end - 1;
- vidc.v_border_end = vidc.v_display_end;
-
- if (machine_is_a5k())
- __raw_writeb(vid_ctl, IOEB_VID_CTL);
-
- if (memcmp(&current_vidc, &vidc, sizeof(vidc))) {
- current_vidc = vidc;
-
- vidc_writel(0xe0000000 | vidc_ctl);
- vidc_writel(0x80000000 | (vidc.h_cycle << 14));
- vidc_writel(0x84000000 | (vidc.h_sync_width << 14));
- vidc_writel(0x88000000 | (vidc.h_border_start << 14));
- vidc_writel(0x8c000000 | (vidc.h_display_start << 14));
- vidc_writel(0x90000000 | (vidc.h_display_end << 14));
- vidc_writel(0x94000000 | (vidc.h_border_end << 14));
- vidc_writel(0x98000000);
- vidc_writel(0x9c000000 | (vidc.h_interlace << 14));
- vidc_writel(0xa0000000 | (vidc.v_cycle << 14));
- vidc_writel(0xa4000000 | (vidc.v_sync_width << 14));
- vidc_writel(0xa8000000 | (vidc.v_border_start << 14));
- vidc_writel(0xac000000 | (vidc.v_display_start << 14));
- vidc_writel(0xb0000000 | (vidc.v_display_end << 14));
- vidc_writel(0xb4000000 | (vidc.v_border_end << 14));
- vidc_writel(0xb8000000);
- vidc_writel(0xbc000000);
- }
-#ifdef DEBUG_MODE_SELECTION
- printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres,
- var->yres, var->bits_per_pixel);
- printk(KERN_DEBUG " H-cycle : %d\n", vidc.h_cycle);
- printk(KERN_DEBUG " H-sync-width : %d\n", vidc.h_sync_width);
- printk(KERN_DEBUG " H-border-start : %d\n", vidc.h_border_start);
- printk(KERN_DEBUG " H-display-start : %d\n", vidc.h_display_start);
- printk(KERN_DEBUG " H-display-end : %d\n", vidc.h_display_end);
- printk(KERN_DEBUG " H-border-end : %d\n", vidc.h_border_end);
- printk(KERN_DEBUG " H-interlace : %d\n", vidc.h_interlace);
- printk(KERN_DEBUG " V-cycle : %d\n", vidc.v_cycle);
- printk(KERN_DEBUG " V-sync-width : %d\n", vidc.v_sync_width);
- printk(KERN_DEBUG " V-border-start : %d\n", vidc.v_border_start);
- printk(KERN_DEBUG " V-display-start : %d\n", vidc.v_display_start);
- printk(KERN_DEBUG " V-display-end : %d\n", vidc.v_display_end);
- printk(KERN_DEBUG " V-border-end : %d\n", vidc.v_border_end);
- printk(KERN_DEBUG " VIDC Ctrl (E) : 0x%08X\n", vidc_ctl);
- printk(KERN_DEBUG " IOEB Ctrl : 0x%08X\n", vid_ctl);
-#endif
-}
-
-static int
-acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int trans, struct fb_info *info)
-{
- union palette pal;
-
- if (regno >= current_par.palette_size)
- return 1;
-
- pal.p = 0;
- pal.vidc.reg = regno;
- pal.vidc.red = red >> 12;
- pal.vidc.green = green >> 12;
- pal.vidc.blue = blue >> 12;
-
- current_par.palette[regno] = pal;
-
- vidc_writel(pal.p);
-
- return 0;
-}
-#endif
-
#ifdef HAS_VIDC20
#include <mach/acornfb.h>
@@ -634,16 +394,7 @@ acornfb_adjust_timing(struct fb_info *info, struct fb_var_screeninfo *var, u_int
/* hsync_len must be even */
var->hsync_len = (var->hsync_len + 1) & ~1;
-#ifdef HAS_VIDC
- /* left_margin must be odd */
- if ((var->left_margin & 1) == 0) {
- var->left_margin -= 1;
- var->right_margin += 1;
- }
-
- /* right_margin must be odd */
- var->right_margin |= 1;
-#elif defined(HAS_VIDC20)
+#if defined(HAS_VIDC20)
/* left_margin must be even */
if (var->left_margin & 1) {
var->left_margin += 1;
@@ -787,11 +538,7 @@ static int acornfb_set_par(struct fb_info *info)
break;
case 8:
current_par.palette_size = VIDC_PALETTE_SIZE;
-#ifdef HAS_VIDC
- info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
-#else
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
-#endif
break;
#ifdef HAS_VIDC20
case 16:
@@ -971,9 +718,6 @@ static void acornfb_init_fbinfo(void)
#if defined(HAS_VIDC20)
fb_info.var.red.length = 8;
fb_info.var.transp.length = 4;
-#elif defined(HAS_VIDC)
- fb_info.var.red.length = 4;
- fb_info.var.transp.length = 1;
#endif
fb_info.var.green = fb_info.var.red;
fb_info.var.blue = fb_info.var.red;
@@ -1310,14 +1054,6 @@ static int acornfb_probe(struct platform_device *dev)
fb_info.fix.smem_start = handle;
}
#endif
-#if defined(HAS_VIDC)
- /*
- * Archimedes/A5000 machines use a fixed address for their
- * framebuffers. Free unused pages
- */
- free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE);
-#endif
-
fb_info.fix.smem_len = size;
current_par.palette_size = VIDC_PALETTE_SIZE;
diff --git a/drivers/video/acornfb.h b/drivers/video/acornfb.h
index fb2a7ff..175c8ff 100644
--- a/drivers/video/acornfb.h
+++ b/drivers/video/acornfb.h
@@ -13,10 +13,6 @@
#include <asm/hardware/iomd.h>
#define VIDC_PALETTE_SIZE 256
#define VIDC_NAME "VIDC20"
-#elif defined(HAS_VIDC)
-#include <asm/hardware/memc.h>
-#define VIDC_PALETTE_SIZE 16
-#define VIDC_NAME "VIDC"
#endif
#define EXTEND8(x) ((x)|(x)<<8)
@@ -101,31 +97,6 @@ struct modex_params {
const struct modey_params *modey;
};
-#ifdef HAS_VIDC
-
-#define VID_CTL_VS_NVSYNC (1 << 3)
-#define VID_CTL_HS_NHSYNC (1 << 2)
-#define VID_CTL_24MHz (0)
-#define VID_CTL_25MHz (1)
-#define VID_CTL_36MHz (2)
-
-#define VIDC_CTRL_CSYNC (1 << 7)
-#define VIDC_CTRL_INTERLACE (1 << 6)
-#define VIDC_CTRL_FIFO_0_4 (0 << 4)
-#define VIDC_CTRL_FIFO_1_5 (1 << 4)
-#define VIDC_CTRL_FIFO_2_6 (2 << 4)
-#define VIDC_CTRL_FIFO_3_7 (3 << 4)
-#define VIDC_CTRL_1BPP (0 << 2)
-#define VIDC_CTRL_2BPP (1 << 2)
-#define VIDC_CTRL_4BPP (2 << 2)
-#define VIDC_CTRL_8BPP (3 << 2)
-#define VIDC_CTRL_DIV3 (0 << 0)
-#define VIDC_CTRL_DIV2 (1 << 0)
-#define VIDC_CTRL_DIV1_5 (2 << 0)
-#define VIDC_CTRL_DIV1 (3 << 0)
-
-#endif
-
#ifdef HAS_VIDC20
/*
* VIDC20 registers
diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
index 285d552..3c14e43 100644
--- a/drivers/video/logo/logo_linux_clut224.ppm
+++ b/drivers/video/logo/logo_linux_clut224.ppm
@@ -1,883 +1,1604 @@
P3
+# Standard 224-color Linux logo
80 80
255
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 11 15 17 33 49 54 59 85 92 73 97 106
-83 116 129 105 131 142 115 114 122 74 88 93 20 29 31 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 6 6 6 10 10 10 10 10 10
-10 10 10 6 6 6 6 6 6 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 2 3 3 17 23 26 50 67 72 73 97 106 59 85 92 73 97 106
-105 131 142 124 127 131 105 131 142 105 131 142 53 75 83 6 8 8 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 6 6 6 10 10 10 14 14 14 22 22 22 26 26 26 30 30 30 34 34 34
-30 30 30 30 30 30 26 26 26 18 18 18 14 14 14 10 10 10 6 6 6 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0
-0 0 0 1 1 1 26 35 39 59 85 92 59 85 92 59 85 92 29 43 47 53 75 83
-108 122 132 132 98 104 108 122 132 105 131 142 101 101 101 43 45 48 6 8 8 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-6 6 6 14 14 14 26 26 26 42 42 42 54 54 54 66 66 66 78 78 78 78 78 78
-78 78 78 74 74 74 66 66 66 54 54 54 42 42 42 26 26 26 18 18 18 10 10 10
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0
-11 15 17 27 40 45 59 85 92 59 85 92 27 40 45 31 45 49 73 97 106 93 121 133
-108 122 132 108 122 132 105 131 142 108 122 132 105 131 142 73 97 106 26 35 39 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10
-22 22 22 42 42 42 66 66 66 86 86 86 66 66 66 38 38 38 38 38 38 22 22 22
-26 26 26 34 34 34 54 54 54 66 66 66 86 86 86 70 70 70 46 46 46 26 26 26
-14 14 14 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 7 12 13 21 31 35 42 59 64
-53 75 83 53 75 83 50 67 72 42 59 64 32 40 45 42 59 64 73 97 106 116 116 116
-132 98 104 116 116 116 108 122 132 117 104 110 105 131 142 83 116 129 50 67 72 7 12 13
-1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 26 26 26
-50 50 50 82 82 82 58 58 58 6 6 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 54 54 54 86 86 86 66 66 66
-38 38 38 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 1 1 1 6 8 8 15 22 25 26 35 39 36 54 60 53 75 83 59 85 92
-59 85 92 48 63 69 15 22 25 12 17 20 52 67 79 94 94 94 132 98 104 132 98 104
-117 104 110 108 122 132 108 122 132 115 114 122 105 131 142 77 105 114 59 85 92 36 54 60
-7 12 13 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 50 50 50
-78 78 78 34 34 34 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 70 70 70
-78 78 78 46 46 46 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 15 22 25 29 43 47 36 54 60 42 59 64 42 59 64 48 63 69 21 31 35
-6 8 8 29 43 47 36 50 56 43 45 48 79 78 84 132 98 104 165 78 79 132 98 104
-108 122 132 117 104 110 117 104 110 108 122 132 77 105 114 73 97 106 95 131 149 78 102 129
-36 50 56 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18 42 42 42 82 82 82
-26 26 26 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 14 14 14 46 46 46 34 34 34 6 6 6 2 2 6
-42 42 42 78 78 78 42 42 42 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-27 40 45 53 75 83 48 63 69 24 31 37 6 8 12 0 0 0 18 25 28 26 35 39
-12 17 20 26 35 39 65 78 84 112 81 86 152 81 83 137 83 86 132 98 104 117 104 110
-117 104 110 132 98 104 132 98 104 115 114 122 73 97 106 53 75 83 95 131 149 93 124 152
-68 78 128 15 22 25 0 0 0 0 0 0 10 10 10 30 30 30 66 66 66 58 58 58
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 26 26 26 86 86 86 101 101 101 46 46 46 10 10 10
-2 2 6 58 58 58 70 70 70 34 34 34 10 10 10 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-36 50 56 21 30 33 4 7 7 0 0 0 1 1 1 17 12 12 69 31 31 68 59 64
-57 59 63 21 31 35 32 40 45 86 73 69 152 81 83 152 81 83 117 104 110 132 98 104
-152 81 83 132 98 104 108 122 132 77 105 114 77 105 114 93 121 133 95 131 149 93 124 152
-95 131 149 53 75 83 11 15 17 0 0 0 14 14 14 42 42 42 86 86 86 10 10 10
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 30 30 30 94 94 94 94 94 94 58 58 58 26 26 26
-2 2 6 6 6 6 78 78 78 54 54 54 22 22 22 6 6 6 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-17 23 26 2 3 3 0 0 0 17 12 12 69 31 31 123 55 55 123 55 55 152 81 83
-86 73 69 17 23 26 7 12 13 45 54 57 101 101 101 137 83 86 132 98 104 132 98 104
-137 83 86 117 104 110 77 105 114 42 59 64 50 67 72 78 102 129 91 117 157 91 117 157
-95 131 149 83 116 129 40 48 73 6 6 6 22 22 22 62 62 62 62 62 62 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 26 26 26 54 54 54 38 38 38 18 18 18 10 10 10
-2 2 6 2 2 6 34 34 34 82 82 82 38 38 38 14 14 14 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-1 1 1 1 2 2 2 3 3 28 12 12 123 55 55 174 79 79 174 79 79 174 79 79
-152 81 83 68 59 64 26 35 39 27 40 45 79 78 84 137 83 86 165 78 79 137 83 86
-94 94 94 48 63 69 36 50 56 50 67 72 73 97 106 93 121 133 93 124 152 93 124 152
-95 131 149 91 118 149 78 102 129 27 40 45 30 30 30 78 78 78 30 30 30 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 10 10 10 10 10 10 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 78 78 78 50 50 50 18 18 18 6 6 6 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-4 5 3 24 53 24 19 31 15 8 7 3 90 61 47 165 78 79 174 79 79 174 79 79
-174 79 79 137 83 86 60 52 57 7 12 13 17 23 26 70 70 70 132 98 104 112 81 86
-79 78 84 31 45 49 15 22 25 53 75 83 91 118 149 86 106 160 91 117 157 93 124 152
-91 117 157 93 124 152 95 131 149 53 75 83 50 50 50 86 86 86 14 14 14 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 54 54 54 66 66 66 26 26 26 6 6 6 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-19 31 15 34 76 34 34 76 34 19 31 15 28 12 12 123 55 55 174 79 79 174 79 79
-174 79 79 165 78 79 112 81 86 32 40 45 15 22 25 38 53 58 65 78 84 29 31 32
-21 30 33 42 59 64 60 80 103 78 102 129 87 112 149 84 96 162 91 117 157 93 124 152
-91 117 157 93 124 152 93 121 133 59 85 92 57 68 71 82 85 86 2 2 6 2 2 6
-2 2 6 6 6 6 10 10 10 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 6 6 6 14 14 14 10 10 10 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 18 18 18 82 82 82 34 34 34 10 10 10 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-34 76 34 40 89 40 40 89 40 34 76 34 8 15 6 48 26 18 123 55 55 174 79 79
-174 79 79 174 79 79 137 83 86 68 59 64 32 40 45 21 30 33 31 45 49 21 31 35
-12 17 20 48 63 69 78 102 129 81 88 166 84 96 162 91 117 157 93 124 152 91 117 157
-93 124 152 95 131 149 83 116 129 59 85 92 57 68 71 86 86 86 2 2 6 2 2 6
-6 6 6 6 6 6 22 22 22 34 34 34 6 6 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 18 18 18 34 34 34 10 10 10 50 50 50 22 22 22 2 2 6
-2 2 6 2 2 6 2 2 6 10 10 10 86 86 86 42 42 42 14 14 14 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-40 89 40 40 89 40 40 89 40 40 89 40 24 53 24 6 6 6 69 31 31 123 55 55
-123 55 55 90 61 47 69 31 31 36 32 33 21 31 35 7 12 13 18 25 28 48 63 69
-60 80 103 68 78 128 84 101 153 84 96 162 84 96 162 91 117 157 91 117 157 84 96 162
-91 117 157 73 97 106 48 63 69 50 67 72 57 59 63 86 86 86 2 2 6 2 2 6
-38 38 38 116 116 116 94 94 94 22 22 22 22 22 22 2 2 6 2 2 6 2 2 6
-14 14 14 86 86 86 124 131 137 170 170 170 151 151 151 38 38 38 26 26 26 6 6 6
-2 2 6 2 2 6 2 2 6 2 2 6 86 86 86 46 46 46 14 14 14 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-34 76 34 40 89 40 40 89 40 40 89 40 34 76 34 19 31 15 17 12 12 48 26 18
-48 26 18 8 7 3 10 10 22 23 29 47 51 61 92 42 59 64 21 30 33 34 45 54
-68 78 128 81 88 166 81 82 173 86 106 160 86 106 160 84 96 162 86 106 160 87 112 149
-91 118 149 77 105 114 52 67 79 32 40 45 50 50 50 86 86 86 2 2 6 14 14 14
-124 131 137 198 198 198 195 195 195 116 116 116 10 10 10 2 2 6 2 2 6 6 6 6
-101 98 89 187 187 187 210 210 210 218 218 218 214 214 214 124 131 137 14 14 14 6 6 6
-2 2 6 2 2 6 2 2 6 2 2 6 86 86 86 50 50 50 18 18 18 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-19 31 15 34 76 34 40 89 40 40 89 40 40 89 40 24 53 24 8 7 3 0 0 0
-6 8 12 28 32 52 51 61 92 54 54 122 74 77 160 68 78 128 26 35 39 6 8 8
-34 45 54 68 78 128 84 96 162 86 106 160 86 106 160 81 88 166 84 96 162 87 112 149
-73 97 106 36 50 56 33 49 54 18 18 18 46 46 46 86 86 86 2 2 6 54 54 54
-218 218 218 195 195 195 226 226 226 246 246 246 58 58 58 2 2 6 2 2 6 30 30 30
-210 210 210 253 253 253 170 170 170 124 127 131 221 221 221 234 234 234 74 74 74 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 70 70 70 58 58 58 22 22 22 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-4 5 3 24 53 24 40 89 40 40 89 40 34 76 34 12 22 15 4 5 3 4 5 3
-13 17 26 54 54 122 78 78 174 78 78 174 78 78 174 74 77 160 51 61 92 21 31 35
-26 35 39 53 75 83 84 101 153 81 82 173 81 88 166 84 101 153 60 80 103 60 80 103
-53 75 83 38 53 58 42 59 64 22 22 22 46 46 46 82 82 82 2 2 6 106 106 106
-170 170 170 26 26 26 86 86 86 226 226 226 124 127 131 10 10 10 14 14 14 46 46 46
-231 231 231 190 190 190 6 6 6 70 70 70 90 90 90 238 238 238 151 151 151 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 70 70 70 58 58 58 22 22 22 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-1 2 2 8 15 6 24 53 24 34 76 34 19 31 15 8 15 6 63 55 20 63 55 20
-18 18 18 40 48 73 74 77 160 78 78 174 78 78 174 81 82 173 74 77 160 52 67 79
-17 23 26 21 31 35 60 80 103 81 88 166 74 77 160 78 102 129 36 54 60 12 17 20
-42 59 64 48 63 69 21 31 35 18 18 18 42 42 42 86 86 86 6 6 6 116 116 116
-106 106 106 6 6 6 70 70 70 151 151 151 124 127 131 18 18 18 38 38 38 54 54 54
-221 221 221 106 106 106 2 2 6 14 14 14 46 46 46 190 190 190 198 198 198 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 74 74 74 62 62 62 22 22 22 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-11 15 17 0 0 0 12 22 15 19 31 15 8 15 6 63 55 20 149 139 69 149 139 69
-63 55 20 10 10 22 54 54 122 78 78 174 78 78 174 78 78 174 81 82 173 68 78 128
-24 31 37 6 6 6 36 50 56 60 80 103 51 61 92 42 59 64 36 50 56 31 45 49
-29 43 47 27 40 45 6 8 8 14 14 14 42 42 42 94 94 94 14 14 14 101 101 101
-124 127 131 2 2 6 18 18 18 116 116 116 106 107 48 121 92 8 121 92 8 98 70 6
-170 170 170 106 106 106 2 2 6 2 2 6 2 2 6 195 195 195 195 195 195 6 6 6
-2 2 6 2 2 6 2 2 6 2 2 6 74 74 74 62 62 62 22 22 22 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-26 35 39 3 5 6 1 1 1 2 3 3 35 31 12 133 118 54 175 176 80 175 176 80
-133 118 54 35 31 12 23 29 47 54 54 122 78 78 174 78 78 174 74 77 160 68 78 128
-51 61 92 31 45 49 26 35 39 36 50 56 29 43 47 7 12 13 21 30 33 42 59 64
-18 25 28 7 12 13 1 1 1 10 10 10 38 38 38 90 90 90 14 14 14 58 58 58
-210 210 210 26 26 26 62 42 6 154 114 10 226 170 11 237 188 10 220 174 15 184 138 11
-220 174 15 174 140 55 35 31 12 2 2 6 70 70 70 246 246 246 124 131 137 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 70 70 70 66 66 66 26 26 26 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-27 40 45 17 23 26 2 3 3 1 1 1 56 77 35 165 152 80 175 176 80 175 176 80
-175 176 80 106 107 48 22 22 22 28 32 52 54 54 122 54 54 122 51 61 92 28 32 52
-20 27 34 31 45 49 11 15 17 7 12 13 36 50 56 31 45 49 29 43 47 36 50 56
-6 8 8 0 0 0 0 0 0 10 10 10 38 38 38 86 86 86 14 14 14 10 10 10
-195 195 195 198 179 130 192 133 9 220 174 15 239 182 13 237 188 10 232 195 16 239 207 25
-237 201 50 241 208 19 232 195 16 184 138 11 198 179 130 208 206 196 42 42 42 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 50 50 50 74 74 74 30 30 30 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-15 22 25 26 35 39 15 22 25 0 0 0 35 31 12 133 118 54 175 176 80 175 176 80
-175 176 80 165 152 80 56 77 35 6 8 12 23 29 47 13 17 26 2 2 6 0 0 0
-1 2 2 26 35 39 26 35 39 26 35 39 42 59 64 42 59 64 20 29 31 6 8 8
-0 0 0 0 0 0 0 0 0 10 10 10 34 34 34 86 86 86 14 14 14 2 2 6
-121 92 8 192 133 9 219 162 10 239 182 13 237 188 10 232 195 16 241 208 19 237 201 50
-237 201 50 239 207 25 241 208 19 241 208 19 241 208 19 230 187 11 121 92 8 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 50 50 50 82 82 82 34 34 34 10 10 10
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-1 2 2 15 22 25 31 45 49 6 8 12 4 5 3 63 55 20 149 139 69 175 176 80
-175 176 80 175 176 80 106 107 48 20 16 6 1 1 1 0 0 0 2 3 3 11 15 17
-21 30 33 36 50 56 36 50 56 24 31 37 15 22 25 6 8 8 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 10 10 10 34 34 34 82 82 82 30 30 30 62 42 6
-180 123 7 206 145 10 230 174 11 239 182 13 237 188 10 238 202 15 241 208 19 237 201 50
-239 207 25 241 208 19 241 208 19 241 208 19 230 187 11 220 174 15 184 138 11 6 6 6
-2 2 6 2 2 6 2 2 6 2 2 6 26 26 26 94 94 94 42 42 42 14 14 14
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 1 2 2 29 43 47 26 35 39 3 5 6 8 7 3 106 107 48 165 152 80
-175 176 80 149 139 69 63 55 20 4 5 3 2 3 3 12 17 20 26 35 39 26 35 39
-17 23 26 7 12 13 6 8 8 3 5 6 1 2 2 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 10 10 10 30 30 30 78 78 78 50 50 50 104 69 6
-192 133 9 216 158 10 236 178 12 237 188 10 232 195 16 241 208 19 237 201 50 237 201 50
-241 208 19 241 208 19 241 208 19 204 160 10 200 144 11 216 158 10 156 118 10 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 90 90 90 54 54 54 18 18 18
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 12 17 20 27 40 45 18 25 28 1 1 1 35 31 12 106 107 48
-149 139 69 56 77 35 8 7 3 1 2 2 12 17 20 26 35 39 21 31 35 11 15 17
-3 5 6 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 10 10 10 30 30 30 78 78 78 46 46 46 22 22 22
-137 92 6 204 160 10 239 182 13 237 188 10 238 202 15 241 208 19 241 208 19 241 208 19
-241 208 19 204 160 10 184 138 11 210 150 10 216 158 10 210 150 10 98 70 6 2 2 6
-6 6 6 54 54 54 14 14 14 2 2 6 2 2 6 62 62 62 74 74 74 30 30 30
-10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 1 1 1 15 22 25 33 49 54 12 17 20 2 3 3 35 31 12
-56 77 35 20 16 6 1 1 1 18 25 28 21 31 35 11 15 17 1 1 1 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 10 10 10 34 34 34 78 78 78 50 50 50 6 6 6
-88 55 22 139 102 15 190 146 13 230 187 11 239 207 25 232 195 16 220 174 15 190 146 13
-171 120 8 192 133 9 210 150 10 213 154 11 185 146 40 165 152 80 101 98 89 2 2 6
-2 2 6 78 78 78 116 116 116 58 58 58 2 2 6 22 22 22 90 90 90 46 46 46
-18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 1 1 1 27 40 45 29 43 47 3 5 6 2 3 3
-8 7 3 1 1 1 17 23 26 31 45 49 15 22 25 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 10 10 10 38 38 38 86 86 86 50 50 50 6 6 6
-124 127 131 168 158 138 156 107 11 171 120 8 204 160 10 184 138 11 197 138 11 200 144 11
-206 145 10 206 145 10 197 138 11 198 179 130 195 195 195 198 198 198 170 170 170 14 14 14
-2 2 6 22 22 22 116 116 116 116 116 116 22 22 22 2 2 6 74 74 74 70 70 70
-30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 11 15 17 31 45 49 26 35 39 3 5 6
-0 0 0 7 12 13 27 40 45 18 25 28 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 6 6 6 18 18 18 50 50 50 101 101 101 26 26 26 10 10 10
-124 131 137 190 190 190 168 158 138 156 107 11 197 138 11 200 144 11 197 138 11 192 133 9
-180 123 7 185 146 40 198 179 130 187 187 187 202 202 202 221 221 221 214 214 214 66 66 66
-2 2 6 2 2 6 50 50 50 62 62 62 6 6 6 2 2 6 10 10 10 90 90 90
-50 50 50 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 15 22 25 36 54 60 18 25 28
-0 0 0 21 30 33 27 40 45 2 3 3 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 10 10 10 34 34 34 74 74 74 74 74 74 2 2 6 6 6 6
-151 151 151 198 198 198 190 190 190 168 158 138 148 132 55 156 107 11 156 107 11 169 125 40
-168 158 138 187 187 187 190 190 190 210 210 210 246 246 246 253 253 253 253 253 253 180 180 180
-6 6 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 62 62 62
-74 74 74 34 34 34 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 27 40 45 35 52 58
-18 25 28 35 52 58 17 23 26 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 10 10 10 22 22 22 54 54 54 94 94 94 18 18 18 2 2 6 46 46 46
-234 234 234 221 221 221 190 190 190 190 190 190 190 190 190 187 187 187 187 187 187 190 190 190
-190 190 190 195 195 195 214 214 214 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253
-82 82 82 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 14 14 14
-86 86 86 54 54 54 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 12 13 33 49 54
-52 72 81 36 54 60 6 8 8 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-6 6 6 18 18 18 46 46 46 90 90 90 46 46 46 18 18 18 6 6 6 180 180 180
-253 253 253 246 246 246 202 202 202 190 190 190 190 190 190 190 190 190 190 190 190 190 190 190
-202 202 202 231 231 231 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-202 202 202 14 14 14 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-42 42 42 86 86 86 42 42 42 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 12 17 20
-36 54 60 29 43 47 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6
-14 14 14 38 38 38 74 74 74 66 66 66 2 2 6 6 6 6 90 90 90 250 250 250
-253 253 253 253 253 253 238 238 238 198 198 198 190 190 190 190 190 190 195 195 195 221 221 221
-246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 82 82 82 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 78 78 78 70 70 70 34 34 34 14 14 14 6 6 6 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-21 30 33 35 52 58 6 8 12 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 14 14
-34 34 34 66 66 66 78 78 78 6 6 6 2 2 6 18 18 18 218 218 218 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246 226 226 226 231 231 231 246 246 246 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 180 180 180 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 18 18 18 90 90 90 62 62 62 30 30 30 10 10 10 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-12 17 20 36 54 60 29 43 47 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 26 26 26
-58 58 58 90 90 90 18 18 18 2 2 6 2 2 6 106 106 106 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 231 231 231 18 18 18 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 18 18 18 94 94 94 54 54 54 26 26 26 10 10 10 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 21 30 33 35 52 58 6 8 12 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 50 50 50
-90 90 90 26 26 26 2 2 6 2 2 6 14 14 14 195 195 195 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 242 242 242 54 54 54 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 38 38 38 86 86 86 50 50 50 22 22 22 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 12 17 20 36 54 60 29 43 47 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 14 14 14 38 38 38 82 82 82
-34 34 34 2 2 6 2 2 6 2 2 6 42 42 42 195 195 195 246 246 246 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250 242 242 242 242 242 242 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 250 250 250 246 246 246 238 238 238
-226 226 226 231 231 231 101 101 101 6 6 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 38 38 38 82 82 82 42 42 42 14 14 14
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 21 30 33 35 52 58 6 8 12 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 26 26 26 62 62 62 66 66 66
-2 2 6 2 2 6 2 2 6 6 6 6 70 70 70 170 170 170 202 202 202 234 234 234
-246 246 246 250 250 250 250 250 250 238 238 238 226 226 226 231 231 231 238 238 238 250 250 250
-250 250 250 250 250 250 246 246 246 231 231 231 214 214 214 202 202 202 202 202 202 202 202 202
-198 198 198 202 202 202 180 180 180 18 18 18 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 62 62 62 66 66 66 30 30 30
-10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 12 17 20 36 54 60 29 43 47 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 14 14 14 42 42 42 82 82 82 18 18 18
-2 2 6 2 2 6 2 2 6 10 10 10 94 94 94 180 180 180 218 218 218 242 242 242
-250 250 250 253 253 253 253 253 253 250 250 250 234 234 234 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246 238 238 238 226 226 226 210 210 210 202 202 202
-195 195 195 195 195 195 210 210 210 151 151 151 6 6 6 14 14 14 50 50 50 14 14 14
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 86 86 86 46 46 46
-18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 21 30 33 35 52 58 6 8 12 0 0 0
-0 0 0 0 0 0 0 0 0 6 6 6 22 22 22 54 54 54 70 70 70 2 2 6
-2 2 6 10 10 10 2 2 6 22 22 22 170 170 170 231 231 231 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250 242 242 242 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 246 246 246
-231 231 231 202 202 202 198 198 198 226 226 226 94 94 94 2 2 6 6 6 6 38 38 38
-30 30 30 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 62 62 62 66 66 66
-26 26 26 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 6 8 8 33 49 54 29 43 47 6 8 12
-0 0 0 0 0 0 0 0 0 10 10 10 30 30 30 74 74 74 50 50 50 2 2 6
-26 26 26 26 26 26 2 2 6 106 106 106 238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 246 246 246 218 218 218 202 202 202 210 210 210 14 14 14 2 2 6 2 2 6
-30 30 30 22 22 22 2 2 6 2 2 6 2 2 6 2 2 6 18 18 18 86 86 86
-42 42 42 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 12 17 20 33 49 54 17 23 26
-0 0 0 0 0 0 0 0 0 14 14 14 42 42 42 90 90 90 22 22 22 2 2 6
-42 42 42 2 2 6 18 18 18 218 218 218 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 250 250 250 221 221 221 218 218 218 101 101 101 2 2 6 14 14 14
-18 18 18 38 38 38 10 10 10 2 2 6 2 2 6 2 2 6 2 2 6 78 78 78
-58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 15 22 25 36 54 60
-0 0 0 0 0 0 0 0 0 18 18 18 54 54 54 82 82 82 2 2 6 26 26 26
-22 22 22 2 2 6 124 127 131 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250 238 238 238 198 198 198 6 6 6 38 38 38
-58 58 58 26 26 26 38 38 38 2 2 6 2 2 6 2 2 6 2 2 6 46 46 46
-78 78 78 30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 21 30 33
-36 54 60 0 0 0 0 0 0 30 30 30 74 74 74 58 58 58 2 2 6 42 42 42
-2 2 6 22 22 22 231 231 231 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 246 246 246 46 46 46 38 38 38
-42 42 42 14 14 14 38 38 38 14 14 14 2 2 6 2 2 6 2 2 6 6 6 6
-86 86 86 46 46 46 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-36 54 60 0 0 0 0 0 0 42 42 42 90 90 90 18 18 18 18 18 18 26 26 26
-2 2 6 116 116 116 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 250 250 250 238 238 238 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 94 94 94 6 6 6
-2 2 6 2 2 6 10 10 10 34 34 34 2 2 6 2 2 6 2 2 6 2 2 6
-74 74 74 58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 36 54 60 26 26 26 66 66 66 82 82 82 2 2 6 38 38 38 6 6 6
-14 14 14 210 210 210 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 246 246 246 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 151 151 151 2 2 6
-2 2 6 2 2 6 2 2 6 46 46 46 2 2 6 2 2 6 2 2 6 2 2 6
-42 42 42 74 74 74 30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-6 6 6 36 54 60 21 30 33 90 90 90 26 26 26 6 6 6 42 42 42 2 2 6
-74 74 74 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 242 242 242 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 180 180 180 2 2 6
-2 2 6 2 2 6 2 2 6 46 46 46 2 2 6 2 2 6 2 2 6 2 2 6
-10 10 10 86 86 86 38 38 38 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-10 10 10 26 26 26 36 54 60 82 82 82 2 2 6 22 22 22 18 18 18 2 2 6
-151 151 151 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 202 202 202 2 2 6
-2 2 6 2 2 6 2 2 6 38 38 38 2 2 6 2 2 6 2 2 6 2 2 6
-6 6 6 86 86 86 46 46 46 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6
-18 18 18 46 46 46 86 86 86 36 54 60 2 2 6 34 34 34 10 10 10 6 6 6
-210 210 210 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 221 221 221 6 6 6
-2 2 6 2 2 6 6 6 6 30 30 30 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 82 82 82 54 54 54 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10
-26 26 26 66 66 66 62 62 62 2 2 6 2 2 6 38 38 38 10 10 10 26 26 26
-238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 238 238 238 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 231 231 231 6 6 6
-2 2 6 2 2 6 10 10 10 30 30 30 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 66 66 66 58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10
-38 38 38 78 78 78 6 6 6 2 2 6 2 2 6 46 46 46 14 14 14 42 42 42
-246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 234 234 234 10 10 10
-2 2 6 2 2 6 22 22 22 14 14 14 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 66 66 66 62 62 62 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18
-50 50 50 74 74 74 2 2 6 2 2 6 14 14 14 70 70 70 34 34 34 62 62 62
-250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 234 234 234 14 14 14
-2 2 6 2 2 6 30 30 30 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 66 66 66 62 62 62 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18
-54 54 54 62 62 62 2 2 6 2 2 6 2 2 6 30 30 30 46 46 46 70 70 70
-250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 226 226 226 10 10 10
-2 2 6 6 6 6 30 30 30 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 66 66 66 58 58 58 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22
-58 58 58 62 62 62 2 2 6 2 2 6 2 2 6 2 2 6 30 30 30 78 78 78
-250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 202 202 202 2 2 6
-22 22 22 34 34 34 20 16 6 22 22 22 26 26 26 18 18 18 6 6 6 2 2 6
-2 2 6 82 82 82 54 54 54 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 26 26 26
-62 62 62 106 106 106 63 55 20 184 138 11 204 160 10 121 92 8 6 6 6 62 62 62
-238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 151 151 151 18 18 18
-14 14 14 2 2 6 2 2 6 2 2 6 6 6 6 18 18 18 66 66 66 38 38 38
-6 6 6 94 94 94 50 50 50 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 6 6 6 10 10 10 10 10 10 18 18 18 38 38 38
-78 78 78 138 132 106 216 158 10 242 186 14 246 190 14 246 190 14 156 118 10 10 10 10
-90 90 90 238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 230 190 214 187 87 214 187 87 185 146 40 35 31 12
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 38 38 38 46 46 46
-26 26 26 106 106 106 54 54 54 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 6 6 6 14 14 14 22 22 22 30 30 30 38 38 38 50 50 50 70 70 70
-106 106 106 185 146 40 226 170 11 242 186 14 246 190 14 246 190 14 246 190 14 154 114 10
-6 6 6 74 74 74 226 226 226 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 237 201 50 241 196 14 241 208 19 232 195 16 35 31 12
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 30 30 30 26 26 26
-204 160 10 165 152 80 66 66 66 26 26 26 6 6 6 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-6 6 6 18 18 18 38 38 38 58 58 58 78 78 78 86 86 86 101 101 101 124 127 131
-174 140 55 210 150 10 234 174 13 246 186 14 246 190 14 246 190 14 246 190 14 237 188 10
-98 70 6 2 2 6 46 46 46 198 198 198 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 214 187 87 242 186 14 241 196 14 204 160 10 20 16 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 6 6 6 121 92 8
-238 202 15 232 195 16 82 82 82 34 34 34 10 10 10 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-14 14 14 38 38 38 70 70 70 148 132 55 185 146 40 200 144 11 197 138 11 197 138 11
-213 154 11 226 170 11 242 186 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-220 174 15 35 31 12 2 2 6 22 22 22 151 151 151 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 242 242 242 214 187 87 239 182 13 237 188 10 213 154 11 35 31 12
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 62 42 6 220 174 15
-237 188 10 237 188 10 113 101 86 42 42 42 14 14 14 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6
-22 22 22 54 54 54 148 132 55 213 154 11 226 170 11 230 174 11 226 170 11 226 170 11
-236 178 12 242 186 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-241 196 14 184 138 11 10 10 10 2 2 6 6 6 6 116 116 116 242 242 242 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 231 231 231 198 198 198 213 164 39 236 178 12 236 178 12 210 150 10 137 92 6
-20 16 6 2 2 6 2 2 6 2 2 6 6 6 6 62 42 6 200 144 11 236 178 12
-239 182 13 239 182 13 124 112 88 58 58 58 22 22 22 6 6 6 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10
-30 30 30 70 70 70 169 125 40 226 170 11 239 182 13 242 186 14 242 186 14 246 186 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 232 195 16 98 70 6 2 2 6 2 2 6 2 2 6 66 66 66 221 221 221
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 202 202 202 198 198 198 213 164 39 230 174 11 230 174 11 216 158 10 192 133 9
-163 110 8 120 80 7 98 70 6 120 80 7 167 114 7 197 138 11 226 170 11 239 182 13
-242 186 14 242 186 14 165 152 80 78 78 78 34 34 34 14 14 14 6 6 6 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6
-30 30 30 78 78 78 185 146 40 226 170 11 239 182 13 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 241 196 14 204 160 10 20 16 6 2 2 6 2 2 6 2 2 6 38 38 38
-218 218 218 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 202 202 202 198 198 198 213 164 39 226 170 11 236 178 12 224 166 10 210 150 10
-200 144 11 197 138 11 192 133 9 197 138 11 210 150 10 226 170 11 242 186 14 246 190 14
-246 190 14 246 186 14 220 174 15 124 112 88 62 62 62 30 30 30 14 14 14 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10
-30 30 30 78 78 78 174 140 55 224 166 10 239 182 13 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 241 196 14 139 102 15 2 2 6 2 2 6 2 2 6 2 2 6
-78 78 78 250 250 250 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 214 214 214 198 198 198 185 146 40 219 162 10 236 178 12 234 174 13 224 166 10
-216 158 10 213 154 11 213 154 11 216 158 10 226 170 11 239 182 13 246 190 14 246 190 14
-246 190 14 246 190 14 242 186 14 213 164 39 101 101 101 58 58 58 30 30 30 14 14 14
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10
-30 30 30 74 74 74 174 140 55 216 158 10 236 178 12 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 241 196 14 230 187 11 62 42 6 2 2 6 2 2 6 2 2 6
-22 22 22 238 238 238 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 226 226 226 187 187 187 169 125 40 216 158 10 236 178 12 239 182 13 236 178 12
-230 174 11 226 170 11 226 170 11 230 174 11 236 178 12 242 186 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 186 14 239 182 13 213 164 39 106 106 106 66 66 66 34 34 34
-14 14 14 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6
-26 26 26 70 70 70 149 139 69 213 154 11 236 178 12 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 241 196 14 190 146 13 20 16 6 2 2 6 2 2 6
-46 46 46 246 246 246 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 221 221 221 86 86 86 156 107 11 216 158 10 236 178 12 242 186 14 246 186 14
-242 186 14 239 182 13 239 182 13 242 186 14 242 186 14 246 186 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 242 186 14 220 174 15 149 139 69 66 66 66
-30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6
-26 26 26 70 70 70 149 139 69 210 150 10 236 178 12 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 232 195 16 121 92 8 34 34 34 106 106 106
-221 221 221 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-242 242 242 82 82 82 20 16 6 163 110 8 216 158 10 236 178 12 242 186 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 242 186 14 149 139 69
-46 46 46 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10
-30 30 30 78 78 78 149 139 69 210 150 10 236 178 12 246 186 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 241 196 14 220 174 15 198 179 130 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 218 218 218
-58 58 58 2 2 6 20 16 6 167 114 7 216 158 10 236 178 12 246 186 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 186 14 242 186 14 185 146 40
-54 54 54 22 22 22 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 14 14
-38 38 38 86 86 86 169 125 40 213 154 11 236 178 12 246 186 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 232 195 16 190 146 13 214 214 214
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 250 250 250 170 170 170 26 26 26
-2 2 6 2 2 6 35 31 12 163 110 8 219 162 10 239 182 13 246 186 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 186 14 236 178 12 224 166 10 149 139 69
-46 46 46 18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18
-50 50 50 113 101 86 192 133 9 224 166 10 242 186 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 242 186 14 230 187 11 204 160 10 133 118 54
-226 226 226 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253 198 198 198 66 66 66 2 2 6 2 2 6
-2 2 6 2 2 6 62 42 6 156 107 11 219 162 10 239 182 13 246 186 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 242 186 14 234 174 13 213 154 11 148 132 55 66 66 66
-30 30 30 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22
-58 58 58 148 132 55 206 145 10 234 174 13 242 186 14 246 186 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 186 14 236 178 12 204 160 10 163 110 8
-62 42 6 124 131 137 218 218 218 250 250 250 253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 210 210 210 151 151 151 66 66 66 6 6 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 62 42 6 163 110 8 216 158 10 236 178 12 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 239 182 13 230 174 11 216 158 10 185 146 40 124 112 88 70 70 70 38 38 38
-18 18 18 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 22 22 22
-62 62 62 169 125 40 206 145 10 224 166 10 236 178 12 239 182 13 242 186 14 242 186 14
-246 186 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 236 178 12 216 158 10 171 120 8
-85 57 6 2 2 6 6 6 6 30 30 30 54 54 54 62 62 62 50 50 50 38 38 38
-14 14 14 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 6 6 6 85 57 6 167 114 7 213 154 11 236 178 12 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 190 14 242 186 14 239 182 13 239 182 13
-230 174 11 210 150 10 174 140 55 124 112 88 82 82 82 54 54 54 34 34 34 18 18 18
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 18 18 18
-50 50 50 169 125 40 192 133 9 200 144 11 216 158 10 219 162 10 224 166 10 226 170 11
-230 174 11 236 178 12 239 182 13 239 182 13 242 186 14 246 186 14 246 190 14 246 190 14
-246 190 14 246 190 14 246 190 14 246 190 14 246 186 14 230 174 11 210 150 10 163 110 8
-104 69 6 10 10 10 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 6 6 6 85 57 6 167 114 7 206 145 10 230 174 11 242 186 14 246 190 14
-246 190 14 246 190 14 246 186 14 242 186 14 239 182 13 230 174 11 224 166 10 213 154 11
-169 125 40 124 112 88 86 86 86 58 58 58 38 38 38 22 22 22 10 10 10 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 14 14
-34 34 34 70 70 70 133 118 54 169 125 40 167 114 7 180 123 7 192 133 9 197 138 11
-200 144 11 206 145 10 213 154 11 219 162 10 224 166 10 230 174 11 239 182 13 242 186 14
-246 186 14 246 186 14 246 186 14 246 186 14 239 182 13 216 158 10 184 138 11 152 99 6
-104 69 6 20 16 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6 2 2 6
-2 2 6 6 6 6 85 57 6 152 99 6 192 133 9 219 162 10 236 178 12 239 182 13
-246 186 14 242 186 14 239 182 13 236 178 12 224 166 10 206 145 10 192 133 9 148 132 55
-94 94 94 62 62 62 42 42 42 22 22 22 14 14 14 6 6 6 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6
-18 18 18 34 34 34 58 58 58 78 78 78 101 98 89 124 112 88 133 118 54 156 107 11
-163 110 8 167 114 7 171 120 8 180 123 7 184 138 11 197 138 11 210 150 10 219 162 10
-226 170 11 236 178 12 236 178 12 234 174 13 219 162 10 197 138 11 163 110 8 134 84 6
-85 57 6 10 10 10 2 2 6 2 2 6 18 18 18 38 38 38 38 38 38 38 38 38
-38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 26 26 26 2 2 6
-2 2 6 6 6 6 62 42 6 137 92 6 171 120 8 200 144 11 219 162 10 230 174 11
-234 174 13 230 174 11 219 162 10 210 150 10 192 133 9 163 110 8 124 112 88 82 82 82
-50 50 50 30 30 30 14 14 14 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-6 6 6 14 14 14 22 22 22 34 34 34 42 42 42 58 58 58 74 74 74 86 86 86
-101 98 89 113 101 86 133 118 54 121 92 8 137 92 6 152 99 6 163 110 8 180 123 7
-184 138 11 197 138 11 206 145 10 200 144 11 180 123 7 156 107 11 134 84 6 104 69 6
-62 42 6 54 54 54 106 106 106 101 98 89 86 86 86 82 82 82 78 78 78 78 78 78
-78 78 78 78 78 78 78 78 78 78 78 78 78 78 78 82 82 82 86 86 86 94 94 94
-106 106 106 101 101 101 90 61 47 120 80 7 156 107 11 180 123 7 192 133 9 200 144 11
-206 145 10 200 144 11 192 133 9 171 120 8 139 102 15 113 101 86 70 70 70 42 42 42
-22 22 22 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 6 6 6 10 10 10 14 14 14 22 22 22 30 30 30 38 38 38
-50 50 50 62 62 62 74 74 74 90 90 90 101 98 89 113 101 86 121 92 8 120 80 7
-137 92 6 152 99 6 152 99 6 152 99 6 134 84 6 120 80 7 98 70 6 88 55 22
-101 98 89 82 82 82 58 58 58 46 46 46 38 38 38 34 34 34 34 34 34 34 34 34
-34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 38 38 38 42 42 42
-54 54 54 82 82 82 94 86 71 85 57 6 134 84 6 156 107 11 167 114 7 171 120 8
-171 120 8 167 114 7 152 99 6 121 92 8 101 98 89 62 62 62 34 34 34 18 18 18
-6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 6 6 6 10 10 10
-18 18 18 22 22 22 30 30 30 42 42 42 50 50 50 66 66 66 86 86 86 101 98 89
-94 86 71 98 70 6 104 69 6 104 69 6 104 69 6 85 57 6 88 55 22 90 90 90
-62 62 62 38 38 38 22 22 22 14 14 14 10 10 10 10 10 10 10 10 10 10 10 10
-10 10 10 10 10 10 6 6 6 10 10 10 10 10 10 10 10 10 10 10 10 14 14 14
-22 22 22 42 42 42 70 70 70 94 86 71 85 57 6 104 69 6 120 80 7 137 92 6
-134 84 6 120 80 7 94 86 71 86 86 86 58 58 58 30 30 30 14 14 14 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 6 6 6 10 10 10 14 14 14 18 18 18 26 26 26 38 38 38 54 54 54
-70 70 70 86 86 86 94 86 71 94 86 71 94 86 71 86 86 86 74 74 74 50 50 50
-30 30 30 14 14 14 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-6 6 6 18 18 18 34 34 34 58 58 58 82 82 82 94 86 71 94 86 71 94 86 71
-94 86 71 94 86 71 74 74 74 50 50 50 26 26 26 14 14 14 6 6 6 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 6 6 6 6 6 6 14 14 14 18 18 18
-30 30 30 38 38 38 46 46 46 54 54 54 50 50 50 42 42 42 30 30 30 18 18 18
-10 10 10 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 6 6 6 14 14 14 26 26 26 38 38 38 50 50 50 58 58 58 58 58 58
-54 54 54 42 42 42 30 30 30 18 18 18 10 10 10 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 6 6
-6 6 6 10 10 10 14 14 14 18 18 18 18 18 18 14 14 14 10 10 10 6 6 6
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 6 6 6 14 14 14 18 18 18 22 22 22 22 22 22
-18 18 18 14 14 14 10 10 10 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 6 6 6 10 10 10 10 10 10
+ 10 10 10 6 6 6 6 6 6 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 6 6 6 10 10 10 14 14 14
+ 22 22 22 26 26 26 30 30 30 34 34 34
+ 30 30 30 30 30 30 26 26 26 18 18 18
+ 14 14 14 10 10 10 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 14 14 14 26 26 26 42 42 42
+ 54 54 54 66 66 66 78 78 78 78 78 78
+ 78 78 78 74 74 74 66 66 66 54 54 54
+ 42 42 42 26 26 26 18 18 18 10 10 10
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 22 22 22 42 42 42 66 66 66 86 86 86
+ 66 66 66 38 38 38 38 38 38 22 22 22
+ 26 26 26 34 34 34 54 54 54 66 66 66
+ 86 86 86 70 70 70 46 46 46 26 26 26
+ 14 14 14 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 10 10 10 26 26 26
+ 50 50 50 82 82 82 58 58 58 6 6 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 6 6 6 54 54 54 86 86 86 66 66 66
+ 38 38 38 18 18 18 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 6 6 6 22 22 22 50 50 50
+ 78 78 78 34 34 34 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 6 6 6 70 70 70
+ 78 78 78 46 46 46 22 22 22 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 18 18 18 42 42 42 82 82 82
+ 26 26 26 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 14 14 14
+ 46 46 46 34 34 34 6 6 6 2 2 6
+ 42 42 42 78 78 78 42 42 42 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 0 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 10 10 10 30 30 30 66 66 66 58 58 58
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 26 26 26
+ 86 86 86 101 101 101 46 46 46 10 10 10
+ 2 2 6 58 58 58 70 70 70 34 34 34
+ 10 10 10 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 14 14 14 42 42 42 86 86 86 10 10 10
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 30 30 30
+ 94 94 94 94 94 94 58 58 58 26 26 26
+ 2 2 6 6 6 6 78 78 78 54 54 54
+ 22 22 22 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 22 22 22 62 62 62 62 62 62 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 26 26 26
+ 54 54 54 38 38 38 18 18 18 10 10 10
+ 2 2 6 2 2 6 34 34 34 82 82 82
+ 38 38 38 14 14 14 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 30 30 30 78 78 78 30 30 30 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 10 10 10
+ 10 10 10 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 78 78 78
+ 50 50 50 18 18 18 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 38 38 38 86 86 86 14 14 14 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 54 54 54
+ 66 66 66 26 26 26 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 42 42 42 82 82 82 2 2 6 2 2 6
+ 2 2 6 6 6 6 10 10 10 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 6 6 6
+ 14 14 14 10 10 10 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 18 18 18
+ 82 82 82 34 34 34 10 10 10 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 46 46 46 86 86 86 2 2 6 2 2 6
+ 6 6 6 6 6 6 22 22 22 34 34 34
+ 6 6 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 18 18 18 34 34 34
+ 10 10 10 50 50 50 22 22 22 2 2 6
+ 2 2 6 2 2 6 2 2 6 10 10 10
+ 86 86 86 42 42 42 14 14 14 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 46 46 46 86 86 86 2 2 6 2 2 6
+ 38 38 38 116 116 116 94 94 94 22 22 22
+ 22 22 22 2 2 6 2 2 6 2 2 6
+ 14 14 14 86 86 86 138 138 138 162 162 162
+154 154 154 38 38 38 26 26 26 6 6 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 86 86 86 46 46 46 14 14 14 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 46 46 46 86 86 86 2 2 6 14 14 14
+134 134 134 198 198 198 195 195 195 116 116 116
+ 10 10 10 2 2 6 2 2 6 6 6 6
+101 98 89 187 187 187 210 210 210 218 218 218
+214 214 214 134 134 134 14 14 14 6 6 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 86 86 86 50 50 50 18 18 18 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 1 0 0 0
+ 0 0 1 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 46 46 46 86 86 86 2 2 6 54 54 54
+218 218 218 195 195 195 226 226 226 246 246 246
+ 58 58 58 2 2 6 2 2 6 30 30 30
+210 210 210 253 253 253 174 174 174 123 123 123
+221 221 221 234 234 234 74 74 74 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 70 70 70 58 58 58 22 22 22 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 46 46 46 82 82 82 2 2 6 106 106 106
+170 170 170 26 26 26 86 86 86 226 226 226
+123 123 123 10 10 10 14 14 14 46 46 46
+231 231 231 190 190 190 6 6 6 70 70 70
+ 90 90 90 238 238 238 158 158 158 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 70 70 70 58 58 58 22 22 22 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 1 0 0 0
+ 0 0 1 0 0 1 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 42 42 42 86 86 86 6 6 6 116 116 116
+106 106 106 6 6 6 70 70 70 149 149 149
+128 128 128 18 18 18 38 38 38 54 54 54
+221 221 221 106 106 106 2 2 6 14 14 14
+ 46 46 46 190 190 190 198 198 198 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 74 74 74 62 62 62 22 22 22 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 1 0 0 0
+ 0 0 1 0 0 0 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 42 42 42 94 94 94 14 14 14 101 101 101
+128 128 128 2 2 6 18 18 18 116 116 116
+118 98 46 121 92 8 121 92 8 98 78 10
+162 162 162 106 106 106 2 2 6 2 2 6
+ 2 2 6 195 195 195 195 195 195 6 6 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 74 74 74 62 62 62 22 22 22 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 1 0 0 1
+ 0 0 1 0 0 0 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 38 38 38 90 90 90 14 14 14 58 58 58
+210 210 210 26 26 26 54 38 6 154 114 10
+226 170 11 236 186 11 225 175 15 184 144 12
+215 174 15 175 146 61 37 26 9 2 2 6
+ 70 70 70 246 246 246 138 138 138 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 70 70 70 66 66 66 26 26 26 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 38 38 38 86 86 86 14 14 14 10 10 10
+195 195 195 188 164 115 192 133 9 225 175 15
+239 182 13 234 190 10 232 195 16 232 200 30
+245 207 45 241 208 19 232 195 16 184 144 12
+218 194 134 211 206 186 42 42 42 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 50 50 50 74 74 74 30 30 30 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 34 34 34 86 86 86 14 14 14 2 2 6
+121 87 25 192 133 9 219 162 10 239 182 13
+236 186 11 232 195 16 241 208 19 244 214 54
+246 218 60 246 218 38 246 215 20 241 208 19
+241 208 19 226 184 13 121 87 25 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 50 50 50 82 82 82 34 34 34 10 10 10
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 34 34 34 82 82 82 30 30 30 61 42 6
+180 123 7 206 145 10 230 174 11 239 182 13
+234 190 10 238 202 15 241 208 19 246 218 74
+246 218 38 246 215 20 246 215 20 246 215 20
+226 184 13 215 174 15 184 144 12 6 6 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 26 26 26 94 94 94 42 42 42 14 14 14
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 30 30 30 78 78 78 50 50 50 104 69 6
+192 133 9 216 158 10 236 178 12 236 186 11
+232 195 16 241 208 19 244 214 54 245 215 43
+246 215 20 246 215 20 241 208 19 198 155 10
+200 144 11 216 158 10 156 118 10 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 6 6 6 90 90 90 54 54 54 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 30 30 30 78 78 78 46 46 46 22 22 22
+137 92 6 210 162 10 239 182 13 238 190 10
+238 202 15 241 208 19 246 215 20 246 215 20
+241 208 19 203 166 17 185 133 11 210 150 10
+216 158 10 210 150 10 102 78 10 2 2 6
+ 6 6 6 54 54 54 14 14 14 2 2 6
+ 2 2 6 62 62 62 74 74 74 30 30 30
+ 10 10 10 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 34 34 34 78 78 78 50 50 50 6 6 6
+ 94 70 30 139 102 15 190 146 13 226 184 13
+232 200 30 232 195 16 215 174 15 190 146 13
+168 122 10 192 133 9 210 150 10 213 154 11
+202 150 34 182 157 106 101 98 89 2 2 6
+ 2 2 6 78 78 78 116 116 116 58 58 58
+ 2 2 6 22 22 22 90 90 90 46 46 46
+ 18 18 18 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 38 38 38 86 86 86 50 50 50 6 6 6
+128 128 128 174 154 114 156 107 11 168 122 10
+198 155 10 184 144 12 197 138 11 200 144 11
+206 145 10 206 145 10 197 138 11 188 164 115
+195 195 195 198 198 198 174 174 174 14 14 14
+ 2 2 6 22 22 22 116 116 116 116 116 116
+ 22 22 22 2 2 6 74 74 74 70 70 70
+ 30 30 30 10 10 10 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 18 18 18
+ 50 50 50 101 101 101 26 26 26 10 10 10
+138 138 138 190 190 190 174 154 114 156 107 11
+197 138 11 200 144 11 197 138 11 192 133 9
+180 123 7 190 142 34 190 178 144 187 187 187
+202 202 202 221 221 221 214 214 214 66 66 66
+ 2 2 6 2 2 6 50 50 50 62 62 62
+ 6 6 6 2 2 6 10 10 10 90 90 90
+ 50 50 50 18 18 18 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 10 10 10 34 34 34
+ 74 74 74 74 74 74 2 2 6 6 6 6
+144 144 144 198 198 198 190 190 190 178 166 146
+154 121 60 156 107 11 156 107 11 168 124 44
+174 154 114 187 187 187 190 190 190 210 210 210
+246 246 246 253 253 253 253 253 253 182 182 182
+ 6 6 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 62 62 62
+ 74 74 74 34 34 34 14 14 14 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 10 10 10 22 22 22 54 54 54
+ 94 94 94 18 18 18 2 2 6 46 46 46
+234 234 234 221 221 221 190 190 190 190 190 190
+190 190 190 187 187 187 187 187 187 190 190 190
+190 190 190 195 195 195 214 214 214 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+ 82 82 82 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 14 14 14
+ 86 86 86 54 54 54 22 22 22 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 18 18 18 46 46 46 90 90 90
+ 46 46 46 18 18 18 6 6 6 182 182 182
+253 253 253 246 246 246 206 206 206 190 190 190
+190 190 190 190 190 190 190 190 190 190 190 190
+206 206 206 231 231 231 250 250 250 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+202 202 202 14 14 14 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 42 42 42 86 86 86 42 42 42 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 14 14 14 38 38 38 74 74 74 66 66 66
+ 2 2 6 6 6 6 90 90 90 250 250 250
+253 253 253 253 253 253 238 238 238 198 198 198
+190 190 190 190 190 190 195 195 195 221 221 221
+246 246 246 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 82 82 82 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 78 78 78 70 70 70 34 34 34
+ 14 14 14 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 34 34 34 66 66 66 78 78 78 6 6 6
+ 2 2 6 18 18 18 218 218 218 253 253 253
+253 253 253 253 253 253 253 253 253 246 246 246
+226 226 226 231 231 231 246 246 246 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 178 178 178 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 18 18 18 90 90 90 62 62 62
+ 30 30 30 10 10 10 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 10 10 10 26 26 26
+ 58 58 58 90 90 90 18 18 18 2 2 6
+ 2 2 6 110 110 110 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+250 250 250 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 231 231 231 18 18 18 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 18 18 18 94 94 94
+ 54 54 54 26 26 26 10 10 10 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 6 6 6 22 22 22 50 50 50
+ 90 90 90 26 26 26 2 2 6 2 2 6
+ 14 14 14 195 195 195 250 250 250 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+250 250 250 242 242 242 54 54 54 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 38 38 38
+ 86 86 86 50 50 50 22 22 22 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 14 14 14 38 38 38 82 82 82
+ 34 34 34 2 2 6 2 2 6 2 2 6
+ 42 42 42 195 195 195 246 246 246 253 253 253
+253 253 253 253 253 253 253 253 253 250 250 250
+242 242 242 242 242 242 250 250 250 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 250 250 250 246 246 246 238 238 238
+226 226 226 231 231 231 101 101 101 6 6 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 38 38 38 82 82 82 42 42 42 14 14 14
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 10 10 10 26 26 26 62 62 62 66 66 66
+ 2 2 6 2 2 6 2 2 6 6 6 6
+ 70 70 70 170 170 170 206 206 206 234 234 234
+246 246 246 250 250 250 250 250 250 238 238 238
+226 226 226 231 231 231 238 238 238 250 250 250
+250 250 250 250 250 250 246 246 246 231 231 231
+214 214 214 206 206 206 202 202 202 202 202 202
+198 198 198 202 202 202 182 182 182 18 18 18
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 62 62 62 66 66 66 30 30 30
+ 10 10 10 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 14 14 14 42 42 42 82 82 82 18 18 18
+ 2 2 6 2 2 6 2 2 6 10 10 10
+ 94 94 94 182 182 182 218 218 218 242 242 242
+250 250 250 253 253 253 253 253 253 250 250 250
+234 234 234 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 246 246 246
+238 238 238 226 226 226 210 210 210 202 202 202
+195 195 195 195 195 195 210 210 210 158 158 158
+ 6 6 6 14 14 14 50 50 50 14 14 14
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 6 6 6 86 86 86 46 46 46
+ 18 18 18 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 22 22 22 54 54 54 70 70 70 2 2 6
+ 2 2 6 10 10 10 2 2 6 22 22 22
+166 166 166 231 231 231 250 250 250 253 253 253
+253 253 253 253 253 253 253 253 253 250 250 250
+242 242 242 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 246 246 246
+231 231 231 206 206 206 198 198 198 226 226 226
+ 94 94 94 2 2 6 6 6 6 38 38 38
+ 30 30 30 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 62 62 62 66 66 66
+ 26 26 26 10 10 10 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 30 30 30 74 74 74 50 50 50 2 2 6
+ 26 26 26 26 26 26 2 2 6 106 106 106
+238 238 238 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 246 246 246 218 218 218 202 202 202
+210 210 210 14 14 14 2 2 6 2 2 6
+ 30 30 30 22 22 22 2 2 6 2 2 6
+ 2 2 6 2 2 6 18 18 18 86 86 86
+ 42 42 42 14 14 14 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 42 42 42 90 90 90 22 22 22 2 2 6
+ 42 42 42 2 2 6 18 18 18 218 218 218
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 250 250 250 221 221 221
+218 218 218 101 101 101 2 2 6 14 14 14
+ 18 18 18 38 38 38 10 10 10 2 2 6
+ 2 2 6 2 2 6 2 2 6 78 78 78
+ 58 58 58 22 22 22 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 18 18 18
+ 54 54 54 82 82 82 2 2 6 26 26 26
+ 22 22 22 2 2 6 123 123 123 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 250 250 250
+238 238 238 198 198 198 6 6 6 38 38 38
+ 58 58 58 26 26 26 38 38 38 2 2 6
+ 2 2 6 2 2 6 2 2 6 46 46 46
+ 78 78 78 30 30 30 10 10 10 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 10 10 10 30 30 30
+ 74 74 74 58 58 58 2 2 6 42 42 42
+ 2 2 6 22 22 22 231 231 231 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 250 250 250
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 246 246 246 46 46 46 38 38 38
+ 42 42 42 14 14 14 38 38 38 14 14 14
+ 2 2 6 2 2 6 2 2 6 6 6 6
+ 86 86 86 46 46 46 14 14 14 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 6 6 6 14 14 14 42 42 42
+ 90 90 90 18 18 18 18 18 18 26 26 26
+ 2 2 6 116 116 116 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 250 250 250 238 238 238
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 94 94 94 6 6 6
+ 2 2 6 2 2 6 10 10 10 34 34 34
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 74 74 74 58 58 58 22 22 22 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 10 10 10 26 26 26 66 66 66
+ 82 82 82 2 2 6 38 38 38 6 6 6
+ 14 14 14 210 210 210 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 246 246 246 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 144 144 144 2 2 6
+ 2 2 6 2 2 6 2 2 6 46 46 46
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 42 42 42 74 74 74 30 30 30 10 10 10
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 14 14 14 42 42 42 90 90 90
+ 26 26 26 6 6 6 42 42 42 2 2 6
+ 74 74 74 250 250 250 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 242 242 242 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 182 182 182 2 2 6
+ 2 2 6 2 2 6 2 2 6 46 46 46
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 10 10 10 86 86 86 38 38 38 10 10 10
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 10 10 10 26 26 26 66 66 66 82 82 82
+ 2 2 6 22 22 22 18 18 18 2 2 6
+149 149 149 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 206 206 206 2 2 6
+ 2 2 6 2 2 6 2 2 6 38 38 38
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 6 6 6 86 86 86 46 46 46 14 14 14
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 18 18 18 46 46 46 86 86 86 18 18 18
+ 2 2 6 34 34 34 10 10 10 6 6 6
+210 210 210 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 221 221 221 6 6 6
+ 2 2 6 2 2 6 6 6 6 30 30 30
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 82 82 82 54 54 54 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 26 26 26 66 66 66 62 62 62 2 2 6
+ 2 2 6 38 38 38 10 10 10 26 26 26
+238 238 238 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 238 238 238
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 6 6 6
+ 2 2 6 2 2 6 10 10 10 30 30 30
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 66 66 66 58 58 58 22 22 22
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 38 38 38 78 78 78 6 6 6 2 2 6
+ 2 2 6 46 46 46 14 14 14 42 42 42
+246 246 246 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234 10 10 10
+ 2 2 6 2 2 6 22 22 22 14 14 14
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 66 66 66 62 62 62 22 22 22
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 18 18 18
+ 50 50 50 74 74 74 2 2 6 2 2 6
+ 14 14 14 70 70 70 34 34 34 62 62 62
+250 250 250 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 246 246 246
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234 14 14 14
+ 2 2 6 2 2 6 30 30 30 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 66 66 66 62 62 62 22 22 22
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 18 18 18
+ 54 54 54 62 62 62 2 2 6 2 2 6
+ 2 2 6 30 30 30 46 46 46 70 70 70
+250 250 250 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 246 246 246
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 226 226 226 10 10 10
+ 2 2 6 6 6 6 30 30 30 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 66 66 66 58 58 58 22 22 22
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 22 22 22
+ 58 58 58 62 62 62 2 2 6 2 2 6
+ 2 2 6 2 2 6 30 30 30 78 78 78
+250 250 250 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 246 246 246
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 206 206 206 2 2 6
+ 22 22 22 34 34 34 18 14 6 22 22 22
+ 26 26 26 18 18 18 6 6 6 2 2 6
+ 2 2 6 82 82 82 54 54 54 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 26 26 26
+ 62 62 62 106 106 106 74 54 14 185 133 11
+210 162 10 121 92 8 6 6 6 62 62 62
+238 238 238 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 246 246 246
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 158 158 158 18 18 18
+ 14 14 14 2 2 6 2 2 6 2 2 6
+ 6 6 6 18 18 18 66 66 66 38 38 38
+ 6 6 6 94 94 94 50 50 50 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 10 10 10 10 10 10 18 18 18 38 38 38
+ 78 78 78 142 134 106 216 158 10 242 186 14
+246 190 14 246 190 14 156 118 10 10 10 10
+ 90 90 90 238 238 238 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 250 250 250
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 246 230 190
+238 204 91 238 204 91 181 142 44 37 26 9
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 38 38 38 46 46 46
+ 26 26 26 106 106 106 54 54 54 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 6 6 6 14 14 14 22 22 22
+ 30 30 30 38 38 38 50 50 50 70 70 70
+106 106 106 190 142 34 226 170 11 242 186 14
+246 190 14 246 190 14 246 190 14 154 114 10
+ 6 6 6 74 74 74 226 226 226 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 231 231 231 250 250 250
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 228 184 62
+241 196 14 241 208 19 232 195 16 38 30 10
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 6 6 6 30 30 30 26 26 26
+203 166 17 154 142 90 66 66 66 26 26 26
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 18 18 18 38 38 38 58 58 58
+ 78 78 78 86 86 86 101 101 101 123 123 123
+175 146 61 210 150 10 234 174 13 246 186 14
+246 190 14 246 190 14 246 190 14 238 190 10
+102 78 10 2 2 6 46 46 46 198 198 198
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 234 234 234 242 242 242
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 224 178 62
+242 186 14 241 196 14 210 166 10 22 18 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 6 6 6 121 92 8
+238 202 15 232 195 16 82 82 82 34 34 34
+ 10 10 10 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 14 14 14 38 38 38 70 70 70 154 122 46
+190 142 34 200 144 11 197 138 11 197 138 11
+213 154 11 226 170 11 242 186 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+225 175 15 46 32 6 2 2 6 22 22 22
+158 158 158 250 250 250 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 250 250 250 242 242 242 224 178 62
+239 182 13 236 186 11 213 154 11 46 32 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 61 42 6 225 175 15
+238 190 10 236 186 11 112 100 78 42 42 42
+ 14 14 14 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 22 22 22 54 54 54 154 122 46 213 154 11
+226 170 11 230 174 11 226 170 11 226 170 11
+236 178 12 242 186 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+241 196 14 184 144 12 10 10 10 2 2 6
+ 6 6 6 116 116 116 242 242 242 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 231 231 231 198 198 198 214 170 54
+236 178 12 236 178 12 210 150 10 137 92 6
+ 18 14 6 2 2 6 2 2 6 2 2 6
+ 6 6 6 70 47 6 200 144 11 236 178 12
+239 182 13 239 182 13 124 112 88 58 58 58
+ 22 22 22 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 30 30 30 70 70 70 180 133 36 226 170 11
+239 182 13 242 186 14 242 186 14 246 186 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 232 195 16 98 70 6 2 2 6
+ 2 2 6 2 2 6 66 66 66 221 221 221
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 206 206 206 198 198 198 214 166 58
+230 174 11 230 174 11 216 158 10 192 133 9
+163 110 8 116 81 8 102 78 10 116 81 8
+167 114 7 197 138 11 226 170 11 239 182 13
+242 186 14 242 186 14 162 146 94 78 78 78
+ 34 34 34 14 14 14 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 30 30 30 78 78 78 190 142 34 226 170 11
+239 182 13 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 241 196 14 203 166 17 22 18 6
+ 2 2 6 2 2 6 2 2 6 38 38 38
+218 218 218 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+250 250 250 206 206 206 198 198 198 202 162 69
+226 170 11 236 178 12 224 166 10 210 150 10
+200 144 11 197 138 11 192 133 9 197 138 11
+210 150 10 226 170 11 242 186 14 246 190 14
+246 190 14 246 186 14 225 175 15 124 112 88
+ 62 62 62 30 30 30 14 14 14 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 30 30 30 78 78 78 174 135 50 224 166 10
+239 182 13 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 241 196 14 139 102 15
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 78 78 78 250 250 250 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+250 250 250 214 214 214 198 198 198 190 150 46
+219 162 10 236 178 12 234 174 13 224 166 10
+216 158 10 213 154 11 213 154 11 216 158 10
+226 170 11 239 182 13 246 190 14 246 190 14
+246 190 14 246 190 14 242 186 14 206 162 42
+101 101 101 58 58 58 30 30 30 14 14 14
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 30 30 30 74 74 74 174 135 50 216 158 10
+236 178 12 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 241 196 14 226 184 13
+ 61 42 6 2 2 6 2 2 6 2 2 6
+ 22 22 22 238 238 238 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 226 226 226 187 187 187 180 133 36
+216 158 10 236 178 12 239 182 13 236 178 12
+230 174 11 226 170 11 226 170 11 230 174 11
+236 178 12 242 186 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 186 14 239 182 13
+206 162 42 106 106 106 66 66 66 34 34 34
+ 14 14 14 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 26 26 26 70 70 70 163 133 67 213 154 11
+236 178 12 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 241 196 14
+190 146 13 18 14 6 2 2 6 2 2 6
+ 46 46 46 246 246 246 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 221 221 221 86 86 86 156 107 11
+216 158 10 236 178 12 242 186 14 246 186 14
+242 186 14 239 182 13 239 182 13 242 186 14
+242 186 14 246 186 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+242 186 14 225 175 15 142 122 72 66 66 66
+ 30 30 30 10 10 10 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 26 26 26 70 70 70 163 133 67 210 150 10
+236 178 12 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+232 195 16 121 92 8 34 34 34 106 106 106
+221 221 221 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+242 242 242 82 82 82 18 14 6 163 110 8
+216 158 10 236 178 12 242 186 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 242 186 14 163 133 67
+ 46 46 46 18 18 18 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 10 10 10
+ 30 30 30 78 78 78 163 133 67 210 150 10
+236 178 12 246 186 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+241 196 14 215 174 15 190 178 144 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 218 218 218
+ 58 58 58 2 2 6 22 18 6 167 114 7
+216 158 10 236 178 12 246 186 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 186 14 242 186 14 190 150 46
+ 54 54 54 22 22 22 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 38 38 38 86 86 86 180 133 36 213 154 11
+236 178 12 246 186 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 232 195 16 190 146 13 214 214 214
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 250 250 250 170 170 170 26 26 26
+ 2 2 6 2 2 6 37 26 9 163 110 8
+219 162 10 239 182 13 246 186 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 186 14 236 178 12 224 166 10 142 122 72
+ 46 46 46 18 18 18 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 18 18 18
+ 50 50 50 109 106 95 192 133 9 224 166 10
+242 186 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+242 186 14 226 184 13 210 162 10 142 110 46
+226 226 226 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253
+198 198 198 66 66 66 2 2 6 2 2 6
+ 2 2 6 2 2 6 50 34 6 156 107 11
+219 162 10 239 182 13 246 186 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 242 186 14
+234 174 13 213 154 11 154 122 46 66 66 66
+ 30 30 30 10 10 10 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 22 22 22
+ 58 58 58 154 121 60 206 145 10 234 174 13
+242 186 14 246 186 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 186 14 236 178 12 210 162 10 163 110 8
+ 61 42 6 138 138 138 218 218 218 250 250 250
+253 253 253 253 253 253 253 253 253 250 250 250
+242 242 242 210 210 210 144 144 144 66 66 66
+ 6 6 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 61 42 6 163 110 8
+216 158 10 236 178 12 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 239 182 13 230 174 11 216 158 10
+190 142 34 124 112 88 70 70 70 38 38 38
+ 18 18 18 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 22 22 22
+ 62 62 62 168 124 44 206 145 10 224 166 10
+236 178 12 239 182 13 242 186 14 242 186 14
+246 186 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 236 178 12 216 158 10 175 118 6
+ 80 54 7 2 2 6 6 6 6 30 30 30
+ 54 54 54 62 62 62 50 50 50 38 38 38
+ 14 14 14 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 6 6 6 80 54 7 167 114 7
+213 154 11 236 178 12 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 242 186 14 239 182 13 239 182 13
+230 174 11 210 150 10 174 135 50 124 112 88
+ 82 82 82 54 54 54 34 34 34 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 18 18 18
+ 50 50 50 158 118 36 192 133 9 200 144 11
+216 158 10 219 162 10 224 166 10 226 170 11
+230 174 11 236 178 12 239 182 13 239 182 13
+242 186 14 246 186 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14
+246 186 14 230 174 11 210 150 10 163 110 8
+104 69 6 10 10 10 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 6 6 6 91 60 6 167 114 7
+206 145 10 230 174 11 242 186 14 246 190 14
+246 190 14 246 190 14 246 186 14 242 186 14
+239 182 13 230 174 11 224 166 10 213 154 11
+180 133 36 124 112 88 86 86 86 58 58 58
+ 38 38 38 22 22 22 10 10 10 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 14 14 14
+ 34 34 34 70 70 70 138 110 50 158 118 36
+167 114 7 180 123 7 192 133 9 197 138 11
+200 144 11 206 145 10 213 154 11 219 162 10
+224 166 10 230 174 11 239 182 13 242 186 14
+246 186 14 246 186 14 246 186 14 246 186 14
+239 182 13 216 158 10 185 133 11 152 99 6
+104 69 6 18 14 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 2 2 6 2 2 6 2 2 6
+ 2 2 6 6 6 6 80 54 7 152 99 6
+192 133 9 219 162 10 236 178 12 239 182 13
+246 186 14 242 186 14 239 182 13 236 178 12
+224 166 10 206 145 10 192 133 9 154 121 60
+ 94 94 94 62 62 62 42 42 42 22 22 22
+ 14 14 14 6 6 6 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 18 18 18 34 34 34 58 58 58 78 78 78
+101 98 89 124 112 88 142 110 46 156 107 11
+163 110 8 167 114 7 175 118 6 180 123 7
+185 133 11 197 138 11 210 150 10 219 162 10
+226 170 11 236 178 12 236 178 12 234 174 13
+219 162 10 197 138 11 163 110 8 130 83 6
+ 91 60 6 10 10 10 2 2 6 2 2 6
+ 18 18 18 38 38 38 38 38 38 38 38 38
+ 38 38 38 38 38 38 38 38 38 38 38 38
+ 38 38 38 38 38 38 26 26 26 2 2 6
+ 2 2 6 6 6 6 70 47 6 137 92 6
+175 118 6 200 144 11 219 162 10 230 174 11
+234 174 13 230 174 11 219 162 10 210 150 10
+192 133 9 163 110 8 124 112 88 82 82 82
+ 50 50 50 30 30 30 14 14 14 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 14 14 14 22 22 22 34 34 34
+ 42 42 42 58 58 58 74 74 74 86 86 86
+101 98 89 122 102 70 130 98 46 121 87 25
+137 92 6 152 99 6 163 110 8 180 123 7
+185 133 11 197 138 11 206 145 10 200 144 11
+180 123 7 156 107 11 130 83 6 104 69 6
+ 50 34 6 54 54 54 110 110 110 101 98 89
+ 86 86 86 82 82 82 78 78 78 78 78 78
+ 78 78 78 78 78 78 78 78 78 78 78 78
+ 78 78 78 82 82 82 86 86 86 94 94 94
+106 106 106 101 101 101 86 66 34 124 80 6
+156 107 11 180 123 7 192 133 9 200 144 11
+206 145 10 200 144 11 192 133 9 175 118 6
+139 102 15 109 106 95 70 70 70 42 42 42
+ 22 22 22 10 10 10 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 6 6 6 10 10 10
+ 14 14 14 22 22 22 30 30 30 38 38 38
+ 50 50 50 62 62 62 74 74 74 90 90 90
+101 98 89 112 100 78 121 87 25 124 80 6
+137 92 6 152 99 6 152 99 6 152 99 6
+138 86 6 124 80 6 98 70 6 86 66 30
+101 98 89 82 82 82 58 58 58 46 46 46
+ 38 38 38 34 34 34 34 34 34 34 34 34
+ 34 34 34 34 34 34 34 34 34 34 34 34
+ 34 34 34 34 34 34 38 38 38 42 42 42
+ 54 54 54 82 82 82 94 86 76 91 60 6
+134 86 6 156 107 11 167 114 7 175 118 6
+175 118 6 167 114 7 152 99 6 121 87 25
+101 98 89 62 62 62 34 34 34 18 18 18
+ 6 6 6 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 6 6 6 6 6 6 10 10 10
+ 18 18 18 22 22 22 30 30 30 42 42 42
+ 50 50 50 66 66 66 86 86 86 101 98 89
+106 86 58 98 70 6 104 69 6 104 69 6
+104 69 6 91 60 6 82 62 34 90 90 90
+ 62 62 62 38 38 38 22 22 22 14 14 14
+ 10 10 10 10 10 10 10 10 10 10 10 10
+ 10 10 10 10 10 10 6 6 6 10 10 10
+ 10 10 10 10 10 10 10 10 10 14 14 14
+ 22 22 22 42 42 42 70 70 70 89 81 66
+ 80 54 7 104 69 6 124 80 6 137 92 6
+134 86 6 116 81 8 100 82 52 86 86 86
+ 58 58 58 30 30 30 14 14 14 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 6 6 6 10 10 10 14 14 14
+ 18 18 18 26 26 26 38 38 38 54 54 54
+ 70 70 70 86 86 86 94 86 76 89 81 66
+ 89 81 66 86 86 86 74 74 74 50 50 50
+ 30 30 30 14 14 14 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 18 18 18 34 34 34 58 58 58
+ 82 82 82 89 81 66 89 81 66 89 81 66
+ 94 86 66 94 86 76 74 74 74 50 50 50
+ 26 26 26 14 14 14 6 6 6 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 6 6 6 6 6 6 14 14 14 18 18 18
+ 30 30 30 38 38 38 46 46 46 54 54 54
+ 50 50 50 42 42 42 30 30 30 18 18 18
+ 10 10 10 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 6 6 6 14 14 14 26 26 26
+ 38 38 38 50 50 50 58 58 58 58 58 58
+ 54 54 54 42 42 42 30 30 30 18 18 18
+ 10 10 10 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 6 6 6 10 10 10 14 14 14 18 18 18
+ 18 18 18 14 14 14 10 10 10 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 6 6 6
+ 14 14 14 18 18 18 22 22 22 22 22 22
+ 18 18 18 14 14 14 10 10 10 6 6 6
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index dbfe2c1..b269abd 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -952,7 +952,7 @@ static struct fb_ops ps3fb_ops = {
.fb_compat_ioctl = ps3fb_ioctl
};
-static struct fb_fix_screeninfo ps3fb_fix __initdata = {
+static struct fb_fix_screeninfo ps3fb_fix = {
.id = DEVICE_NAME,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 1aba255..98917fc 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -766,7 +766,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
kfree(vp_dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtio_pci_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -824,7 +824,7 @@ static struct pci_driver virtio_pci_driver = {
.id_table = virtio_pci_id_table,
.probe = virtio_pci_probe,
.remove = virtio_pci_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.driver.pm = &virtio_pci_pm_ops,
#endif
};
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 2bd1257..efc7f07 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
config W1_MASTER_DS1WM
tristate "Maxim DS1WM 1-wire busmaster"
- depends on W1 && GENERIC_HARDIRQS
+ depends on W1
help
Say Y here to enable the DS1WM 1-wire driver, such as that
in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 47e12cf..15c7251 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -152,8 +152,6 @@ static int mxc_w1_remove(struct platform_device *pdev)
clk_disable_unprepare(mdev->clk);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 22013ca..c7c64f1 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -234,9 +234,11 @@ static ssize_t w1_master_attribute_store_search(struct device * dev,
{
long tmp;
struct w1_master *md = dev_to_w1_master(dev);
+ int ret;
- if (strict_strtol(buf, 0, &tmp) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 0, &tmp);
+ if (ret)
+ return ret;
mutex_lock(&md->mutex);
md->search_count = tmp;
@@ -266,9 +268,11 @@ static ssize_t w1_master_attribute_store_pullup(struct device *dev,
{
long tmp;
struct w1_master *md = dev_to_w1_master(dev);
+ int ret;
- if (strict_strtol(buf, 0, &tmp) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 0, &tmp);
+ if (ret)
+ return ret;
mutex_lock(&md->mutex);
md->enable_pullup = tmp;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 362085d..d1d53f3 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -290,6 +290,16 @@ config ORION_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called orion_wdt.
+config SUNXI_WATCHDOG
+ tristate "Allwinner SoCs watchdog support"
+ depends on ARCH_SUNXI
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ in Allwinner SoCs.
+ To compile this driver as a module, choose M here: the
+ module will be called sunxi_wdt.
+
config COH901327_WATCHDOG
bool "ST-Ericsson COH 901 327 watchdog"
depends on ARCH_U300
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 2f26a0b..6c5bb27 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
+obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 2f3cc8f..b3709f9 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -280,11 +280,6 @@ static int ar7_wdt_probe(struct platform_device *pdev)
ar7_regs_wdt =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- if (!ar7_regs_wdt) {
- pr_err("could not get registers resource\n");
- return -ENODEV;
- }
-
ar7_wdt = devm_ioremap_resource(&pdev->dev, ar7_regs_wdt);
if (IS_ERR(ar7_wdt))
return PTR_ERR(ar7_wdt);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index de7e4f4..5be5e3d 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -162,7 +162,8 @@ extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
#define HPWDT_ARCH 32
asm(".text \n\t"
- ".align 4 \n"
+ ".align 4 \n\t"
+ ".globl asminline_call \n"
"asminline_call: \n\t"
"pushl %ebp \n\t"
"movl %esp, %ebp \n\t"
@@ -352,7 +353,8 @@ static int detect_cru_service(void)
#define HPWDT_ARCH 64
asm(".text \n\t"
- ".align 4 \n"
+ ".align 4 \n\t"
+ ".globl asminline_call \n"
"asminline_call: \n\t"
"pushq %rbp \n\t"
"movq %rsp, %rbp \n\t"
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index e2b6d2c..b15b6ef 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -256,11 +256,6 @@ static int nuc900wdt_probe(struct platform_device *pdev)
spin_lock_init(&nuc900_wdt->wdt_lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource specified\n");
- return -ENOENT;
- }
-
nuc900_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(nuc900_wdt->wdt_base))
return PTR_ERR(nuc900_wdt->wdt_base);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 6a22cf5..23aad7c 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -84,13 +84,17 @@ MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
"0 to reboot (default 0)");
MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
-static struct device *wdt_dev; /* platform device attached to */
-static struct resource *wdt_mem;
-static struct resource *wdt_irq;
-static struct clk *wdt_clock;
-static void __iomem *wdt_base;
-static unsigned int wdt_count;
-static DEFINE_SPINLOCK(wdt_lock);
+struct s3c2410_wdt {
+ struct device *dev;
+ struct clk *clock;
+ void __iomem *reg_base;
+ unsigned int count;
+ spinlock_t lock;
+ unsigned long wtcon_save;
+ unsigned long wtdat_save;
+ struct watchdog_device wdt_device;
+ struct notifier_block freq_transition;
+};
/* watchdog control routines */
@@ -102,29 +106,38 @@ do { \
/* functions */
+static inline struct s3c2410_wdt *freq_to_wdt(struct notifier_block *nb)
+{
+ return container_of(nb, struct s3c2410_wdt, freq_transition);
+}
+
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
- spin_lock(&wdt_lock);
- writel(wdt_count, wdt_base + S3C2410_WTCNT);
- spin_unlock(&wdt_lock);
+ struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ spin_lock(&wdt->lock);
+ writel(wdt->count, wdt->reg_base + S3C2410_WTCNT);
+ spin_unlock(&wdt->lock);
return 0;
}
-static void __s3c2410wdt_stop(void)
+static void __s3c2410wdt_stop(struct s3c2410_wdt *wdt)
{
unsigned long wtcon;
- wtcon = readl(wdt_base + S3C2410_WTCON);
+ wtcon = readl(wdt->reg_base + S3C2410_WTCON);
wtcon &= ~(S3C2410_WTCON_ENABLE | S3C2410_WTCON_RSTEN);
- writel(wtcon, wdt_base + S3C2410_WTCON);
+ writel(wtcon, wdt->reg_base + S3C2410_WTCON);
}
static int s3c2410wdt_stop(struct watchdog_device *wdd)
{
- spin_lock(&wdt_lock);
- __s3c2410wdt_stop();
- spin_unlock(&wdt_lock);
+ struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ spin_lock(&wdt->lock);
+ __s3c2410wdt_stop(wdt);
+ spin_unlock(&wdt->lock);
return 0;
}
@@ -132,12 +145,13 @@ static int s3c2410wdt_stop(struct watchdog_device *wdd)
static int s3c2410wdt_start(struct watchdog_device *wdd)
{
unsigned long wtcon;
+ struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
- spin_lock(&wdt_lock);
+ spin_lock(&wdt->lock);
- __s3c2410wdt_stop();
+ __s3c2410wdt_stop(wdt);
- wtcon = readl(wdt_base + S3C2410_WTCON);
+ wtcon = readl(wdt->reg_base + S3C2410_WTCON);
wtcon |= S3C2410_WTCON_ENABLE | S3C2410_WTCON_DIV128;
if (soft_noboot) {
@@ -148,25 +162,26 @@ static int s3c2410wdt_start(struct watchdog_device *wdd)
wtcon |= S3C2410_WTCON_RSTEN;
}
- DBG("%s: wdt_count=0x%08x, wtcon=%08lx\n",
- __func__, wdt_count, wtcon);
+ DBG("%s: count=0x%08x, wtcon=%08lx\n",
+ __func__, wdt->count, wtcon);
- writel(wdt_count, wdt_base + S3C2410_WTDAT);
- writel(wdt_count, wdt_base + S3C2410_WTCNT);
- writel(wtcon, wdt_base + S3C2410_WTCON);
- spin_unlock(&wdt_lock);
+ writel(wdt->count, wdt->reg_base + S3C2410_WTDAT);
+ writel(wdt->count, wdt->reg_base + S3C2410_WTCNT);
+ writel(wtcon, wdt->reg_base + S3C2410_WTCON);
+ spin_unlock(&wdt->lock);
return 0;
}
-static inline int s3c2410wdt_is_running(void)
+static inline int s3c2410wdt_is_running(struct s3c2410_wdt *wdt)
{
- return readl(wdt_base + S3C2410_WTCON) & S3C2410_WTCON_ENABLE;
+ return readl(wdt->reg_base + S3C2410_WTCON) & S3C2410_WTCON_ENABLE;
}
static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeout)
{
- unsigned long freq = clk_get_rate(wdt_clock);
+ struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned long freq = clk_get_rate(wdt->clock);
unsigned int count;
unsigned int divisor = 1;
unsigned long wtcon;
@@ -192,7 +207,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
}
if ((count / divisor) >= 0x10000) {
- dev_err(wdt_dev, "timeout %d too big\n", timeout);
+ dev_err(wdt->dev, "timeout %d too big\n", timeout);
return -EINVAL;
}
}
@@ -201,15 +216,15 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
__func__, timeout, divisor, count, count/divisor);
count /= divisor;
- wdt_count = count;
+ wdt->count = count;
/* update the pre-scaler */
- wtcon = readl(wdt_base + S3C2410_WTCON);
+ wtcon = readl(wdt->reg_base + S3C2410_WTCON);
wtcon &= ~S3C2410_WTCON_PRESCALE_MASK;
wtcon |= S3C2410_WTCON_PRESCALE(divisor-1);
- writel(count, wdt_base + S3C2410_WTDAT);
- writel(wtcon, wdt_base + S3C2410_WTCON);
+ writel(count, wdt->reg_base + S3C2410_WTDAT);
+ writel(wtcon, wdt->reg_base + S3C2410_WTCON);
wdd->timeout = (count * divisor) / freq;
@@ -242,21 +257,23 @@ static struct watchdog_device s3c2410_wdd = {
static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
{
- dev_info(wdt_dev, "watchdog timer expired (irq)\n");
+ struct s3c2410_wdt *wdt = platform_get_drvdata(param);
+
+ dev_info(wdt->dev, "watchdog timer expired (irq)\n");
- s3c2410wdt_keepalive(&s3c2410_wdd);
+ s3c2410wdt_keepalive(&wdt->wdt_device);
return IRQ_HANDLED;
}
-
#ifdef CONFIG_CPU_FREQ
static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
int ret;
+ struct s3c2410_wdt *wdt = freq_to_wdt(nb);
- if (!s3c2410wdt_is_running())
+ if (!s3c2410wdt_is_running(wdt))
goto done;
if (val == CPUFREQ_PRECHANGE) {
@@ -265,14 +282,15 @@ static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
* the watchdog is running.
*/
- s3c2410wdt_keepalive(&s3c2410_wdd);
+ s3c2410wdt_keepalive(&wdt->wdt_device);
} else if (val == CPUFREQ_POSTCHANGE) {
- s3c2410wdt_stop(&s3c2410_wdd);
+ s3c2410wdt_stop(&wdt->wdt_device);
- ret = s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout);
+ ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
+ wdt->wdt_device.timeout);
if (ret >= 0)
- s3c2410wdt_start(&s3c2410_wdd);
+ s3c2410wdt_start(&wdt->wdt_device);
else
goto err;
}
@@ -281,34 +299,35 @@ done:
return 0;
err:
- dev_err(wdt_dev, "cannot set new value for timeout %d\n",
- s3c2410_wdd.timeout);
+ dev_err(wdt->dev, "cannot set new value for timeout %d\n",
+ wdt->wdt_device.timeout);
return ret;
}
-static struct notifier_block s3c2410wdt_cpufreq_transition_nb = {
- .notifier_call = s3c2410wdt_cpufreq_transition,
-};
-
-static inline int s3c2410wdt_cpufreq_register(void)
+static inline int s3c2410wdt_cpufreq_register(struct s3c2410_wdt *wdt)
{
- return cpufreq_register_notifier(&s3c2410wdt_cpufreq_transition_nb,
+ wdt->freq_transition.notifier_call = s3c2410wdt_cpufreq_transition;
+
+ return cpufreq_register_notifier(&wdt->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
-static inline void s3c2410wdt_cpufreq_deregister(void)
+static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
{
- cpufreq_unregister_notifier(&s3c2410wdt_cpufreq_transition_nb,
+ wdt->freq_transition.notifier_call = s3c2410wdt_cpufreq_transition;
+
+ cpufreq_unregister_notifier(&wdt->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
#else
-static inline int s3c2410wdt_cpufreq_register(void)
+
+static inline int s3c2410wdt_cpufreq_register(struct s3c2410_wdt *wdt)
{
return 0;
}
-static inline void s3c2410wdt_cpufreq_deregister(void)
+static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
{
}
#endif
@@ -316,6 +335,9 @@ static inline void s3c2410wdt_cpufreq_deregister(void)
static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev;
+ struct s3c2410_wdt *wdt;
+ struct resource *wdt_mem;
+ struct resource *wdt_irq;
unsigned int wtcon;
int started = 0;
int ret;
@@ -323,13 +345,14 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
DBG("%s: probe=%p\n", __func__, pdev);
dev = &pdev->dev;
- wdt_dev = &pdev->dev;
- wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (wdt_mem == NULL) {
- dev_err(dev, "no memory resource specified\n");
- return -ENOENT;
- }
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->dev = &pdev->dev;
+ spin_lock_init(&wdt->lock);
+ wdt->wdt_device = s3c2410_wdd;
wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (wdt_irq == NULL) {
@@ -339,35 +362,40 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
}
/* get the memory region for the watchdog timer */
- wdt_base = devm_ioremap_resource(dev, wdt_mem);
- if (IS_ERR(wdt_base)) {
- ret = PTR_ERR(wdt_base);
+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->reg_base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(wdt->reg_base)) {
+ ret = PTR_ERR(wdt->reg_base);
goto err;
}
- DBG("probe: mapped wdt_base=%p\n", wdt_base);
+ DBG("probe: mapped reg_base=%p\n", wdt->reg_base);
- wdt_clock = devm_clk_get(dev, "watchdog");
- if (IS_ERR(wdt_clock)) {
+ wdt->clock = devm_clk_get(dev, "watchdog");
+ if (IS_ERR(wdt->clock)) {
dev_err(dev, "failed to find watchdog clock source\n");
- ret = PTR_ERR(wdt_clock);
+ ret = PTR_ERR(wdt->clock);
goto err;
}
- clk_prepare_enable(wdt_clock);
+ clk_prepare_enable(wdt->clock);
- ret = s3c2410wdt_cpufreq_register();
+ ret = s3c2410wdt_cpufreq_register(wdt);
if (ret < 0) {
dev_err(dev, "failed to register cpufreq\n");
goto err_clk;
}
+ watchdog_set_drvdata(&wdt->wdt_device, wdt);
+
/* see if we can actually set the requested timer margin, and if
* not, try the default value */
- watchdog_init_timeout(&s3c2410_wdd, tmr_margin, &pdev->dev);
- if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout)) {
- started = s3c2410wdt_set_heartbeat(&s3c2410_wdd,
+ watchdog_init_timeout(&wdt->wdt_device, tmr_margin, &pdev->dev);
+ ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
+ wdt->wdt_device.timeout);
+ if (ret) {
+ started = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
if (started == 0)
@@ -386,9 +414,9 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
goto err_cpufreq;
}
- watchdog_set_nowayout(&s3c2410_wdd, nowayout);
+ watchdog_set_nowayout(&wdt->wdt_device, nowayout);
- ret = watchdog_register_device(&s3c2410_wdd);
+ ret = watchdog_register_device(&wdt->wdt_device);
if (ret) {
dev_err(dev, "cannot register watchdog (%d)\n", ret);
goto err_cpufreq;
@@ -396,18 +424,20 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
if (tmr_atboot && started == 0) {
dev_info(dev, "starting watchdog timer\n");
- s3c2410wdt_start(&s3c2410_wdd);
+ s3c2410wdt_start(&wdt->wdt_device);
} else if (!tmr_atboot) {
/* if we're not enabling the watchdog, then ensure it is
* disabled if it has been left running from the bootloader
* or other source */
- s3c2410wdt_stop(&s3c2410_wdd);
+ s3c2410wdt_stop(&wdt->wdt_device);
}
+ platform_set_drvdata(pdev, wdt);
+
/* print out a statement of readiness */
- wtcon = readl(wdt_base + S3C2410_WTCON);
+ wtcon = readl(wdt->reg_base + S3C2410_WTCON);
dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
(wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
@@ -417,64 +447,64 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return 0;
err_cpufreq:
- s3c2410wdt_cpufreq_deregister();
+ s3c2410wdt_cpufreq_deregister(wdt);
err_clk:
- clk_disable_unprepare(wdt_clock);
- wdt_clock = NULL;
+ clk_disable_unprepare(wdt->clock);
+ wdt->clock = NULL;
err:
- wdt_irq = NULL;
- wdt_mem = NULL;
return ret;
}
static int s3c2410wdt_remove(struct platform_device *dev)
{
- watchdog_unregister_device(&s3c2410_wdd);
+ struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
- s3c2410wdt_cpufreq_deregister();
+ watchdog_unregister_device(&wdt->wdt_device);
- clk_disable_unprepare(wdt_clock);
- wdt_clock = NULL;
+ s3c2410wdt_cpufreq_deregister(wdt);
+
+ clk_disable_unprepare(wdt->clock);
+ wdt->clock = NULL;
- wdt_irq = NULL;
- wdt_mem = NULL;
return 0;
}
static void s3c2410wdt_shutdown(struct platform_device *dev)
{
- s3c2410wdt_stop(&s3c2410_wdd);
+ struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
+
+ s3c2410wdt_stop(&wdt->wdt_device);
}
#ifdef CONFIG_PM_SLEEP
-static unsigned long wtcon_save;
-static unsigned long wtdat_save;
-
static int s3c2410wdt_suspend(struct device *dev)
{
+ struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
+
/* Save watchdog state, and turn it off. */
- wtcon_save = readl(wdt_base + S3C2410_WTCON);
- wtdat_save = readl(wdt_base + S3C2410_WTDAT);
+ wdt->wtcon_save = readl(wdt->reg_base + S3C2410_WTCON);
+ wdt->wtdat_save = readl(wdt->reg_base + S3C2410_WTDAT);
/* Note that WTCNT doesn't need to be saved. */
- s3c2410wdt_stop(&s3c2410_wdd);
+ s3c2410wdt_stop(&wdt->wdt_device);
return 0;
}
static int s3c2410wdt_resume(struct device *dev)
{
- /* Restore watchdog state. */
+ struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
- writel(wtdat_save, wdt_base + S3C2410_WTDAT);
- writel(wtdat_save, wdt_base + S3C2410_WTCNT); /* Reset count */
- writel(wtcon_save, wdt_base + S3C2410_WTCON);
+ /* Restore watchdog state. */
+ writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTDAT);
+ writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTCNT);/* Reset count */
+ writel(wdt->wtcon_save, wdt->reg_base + S3C2410_WTCON);
dev_info(dev, "watchdog %sabled\n",
- (wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis");
+ (wdt->wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis");
return 0;
}
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
new file mode 100644
index 0000000..1f94b42
--- /dev/null
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -0,0 +1,237 @@
+/*
+ * sunxi Watchdog Driver
+ *
+ * Copyright (c) 2013 Carlo Caione
+ * 2012 Henrik Nordstrom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Based on xen_wdt.c
+ * (c) Copyright 2010 Novell, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define WDT_MAX_TIMEOUT 16
+#define WDT_MIN_TIMEOUT 1
+#define WDT_MODE_TIMEOUT(n) ((n) << 3)
+#define WDT_TIMEOUT_MASK WDT_MODE_TIMEOUT(0x0F)
+
+#define WDT_CTRL 0x00
+#define WDT_CTRL_RELOAD ((1 << 0) | (0x0a57 << 1))
+
+#define WDT_MODE 0x04
+#define WDT_MODE_EN (1 << 0)
+#define WDT_MODE_RST_EN (1 << 1)
+
+#define DRV_NAME "sunxi-wdt"
+#define DRV_VERSION "1.0"
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+static unsigned int timeout = WDT_MAX_TIMEOUT;
+
+struct sunxi_wdt_dev {
+ struct watchdog_device wdt_dev;
+ void __iomem *wdt_base;
+};
+
+/*
+ * wdt_timeout_map maps the watchdog timer interval value in seconds to
+ * the value of the register WDT_MODE bit 3:6
+ *
+ * [timeout seconds] = register value
+ *
+ */
+
+static const int wdt_timeout_map[] = {
+ [1] = 0b0001, /* 1s */
+ [2] = 0b0010, /* 2s */
+ [3] = 0b0011, /* 3s */
+ [4] = 0b0100, /* 4s */
+ [5] = 0b0101, /* 5s */
+ [6] = 0b0110, /* 6s */
+ [8] = 0b0111, /* 8s */
+ [10] = 0b1000, /* 10s */
+ [12] = 0b1001, /* 12s */
+ [14] = 0b1010, /* 14s */
+ [16] = 0b1011, /* 16s */
+};
+
+static int sunxi_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
+ void __iomem *wdt_base = sunxi_wdt->wdt_base;
+
+ iowrite32(WDT_CTRL_RELOAD, wdt_base + WDT_CTRL);
+
+ return 0;
+}
+
+static int sunxi_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
+ void __iomem *wdt_base = sunxi_wdt->wdt_base;
+ u32 reg;
+
+ if (wdt_timeout_map[timeout] == 0)
+ timeout++;
+
+ sunxi_wdt->wdt_dev.timeout = timeout;
+
+ reg = ioread32(wdt_base + WDT_MODE);
+ reg &= ~WDT_TIMEOUT_MASK;
+ reg |= WDT_MODE_TIMEOUT(wdt_timeout_map[timeout]);
+ iowrite32(reg, wdt_base + WDT_MODE);
+
+ sunxi_wdt_ping(wdt_dev);
+
+ return 0;
+}
+
+static int sunxi_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
+ void __iomem *wdt_base = sunxi_wdt->wdt_base;
+
+ iowrite32(0, wdt_base + WDT_MODE);
+
+ return 0;
+}
+
+static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
+{
+ u32 reg;
+ struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
+ void __iomem *wdt_base = sunxi_wdt->wdt_base;
+ int ret;
+
+ ret = sunxi_wdt_set_timeout(&sunxi_wdt->wdt_dev,
+ sunxi_wdt->wdt_dev.timeout);
+ if (ret < 0)
+ return ret;
+
+ reg = ioread32(wdt_base + WDT_MODE);
+ reg |= (WDT_MODE_RST_EN | WDT_MODE_EN);
+ iowrite32(reg, wdt_base + WDT_MODE);
+
+ return 0;
+}
+
+static const struct watchdog_info sunxi_wdt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops sunxi_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sunxi_wdt_start,
+ .stop = sunxi_wdt_stop,
+ .ping = sunxi_wdt_ping,
+ .set_timeout = sunxi_wdt_set_timeout,
+};
+
+static int __init sunxi_wdt_probe(struct platform_device *pdev)
+{
+ struct sunxi_wdt_dev *sunxi_wdt;
+ struct resource *res;
+ int err;
+
+ sunxi_wdt = devm_kzalloc(&pdev->dev, sizeof(*sunxi_wdt), GFP_KERNEL);
+ if (!sunxi_wdt)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, sunxi_wdt);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sunxi_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sunxi_wdt->wdt_base))
+ return PTR_ERR(sunxi_wdt->wdt_base);
+
+ sunxi_wdt->wdt_dev.info = &sunxi_wdt_info;
+ sunxi_wdt->wdt_dev.ops = &sunxi_wdt_ops;
+ sunxi_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
+ sunxi_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
+ sunxi_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
+ sunxi_wdt->wdt_dev.parent = &pdev->dev;
+
+ watchdog_init_timeout(&sunxi_wdt->wdt_dev, timeout, &pdev->dev);
+ watchdog_set_nowayout(&sunxi_wdt->wdt_dev, nowayout);
+
+ watchdog_set_drvdata(&sunxi_wdt->wdt_dev, sunxi_wdt);
+
+ sunxi_wdt_stop(&sunxi_wdt->wdt_dev);
+
+ err = watchdog_register_device(&sunxi_wdt->wdt_dev);
+ if (unlikely(err))
+ return err;
+
+ dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
+ sunxi_wdt->wdt_dev.timeout, nowayout);
+
+ return 0;
+}
+
+static int __exit sunxi_wdt_remove(struct platform_device *pdev)
+{
+ struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&sunxi_wdt->wdt_dev);
+ watchdog_set_drvdata(&sunxi_wdt->wdt_dev, NULL);
+
+ return 0;
+}
+
+static void sunxi_wdt_shutdown(struct platform_device *pdev)
+{
+ struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
+
+ sunxi_wdt_stop(&sunxi_wdt->wdt_dev);
+}
+
+static const struct of_device_id sunxi_wdt_dt_ids[] = {
+ { .compatible = "allwinner,sun4i-wdt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
+
+static struct platform_driver sunxi_wdt_driver = {
+ .probe = sunxi_wdt_probe,
+ .remove = sunxi_wdt_remove,
+ .shutdown = sunxi_wdt_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(sunxi_wdt_dt_ids)
+ },
+};
+
+module_platform_driver(sunxi_wdt_driver);
+
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds");
+
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Carlo Caione <carlo.caione@gmail.com>");
+MODULE_AUTHOR("Henrik Nordstrom <henrik@henriknordstrom.net>");
+MODULE_DESCRIPTION("sunxi WatchDog Timer Driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 4da59b4..42913f1 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -403,21 +403,11 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
}
r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r1) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -ENODEV;
- }
-
wdt->control_reg = devm_ioremap_resource(&pdev->dev, r1);
if (IS_ERR(wdt->control_reg))
return PTR_ERR(wdt->control_reg);
r2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r2) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -ENODEV;
- }
-
wdt->feed_reg = devm_ioremap_resource(&pdev->dev, r2);
if (IS_ERR(wdt->feed_reg))
return PTR_ERR(wdt->feed_reg);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 3101cf6..a50c6e3 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -349,8 +349,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
BUG_ON(page == NULL);
pfn = page_to_pfn(page);
- BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
- phys_to_machine_mapping_valid(pfn));
set_phys_to_machine(pfn, frame_list[i]);
@@ -380,6 +378,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
enum bp_state state = BP_DONE;
unsigned long pfn, i;
struct page *page;
+ struct page *scratch_page;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -399,6 +398,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
+ scratch_page = get_balloon_scratch_page();
+
for (i = 0; i < nr_pages; i++) {
page = alloc_page(gfp);
if (page == NULL) {
@@ -416,7 +417,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
if (xen_pv_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte(page_to_pfn(__get_cpu_var(balloon_scratch_page)),
+ pfn_pte(page_to_pfn(scratch_page),
PAGE_KERNEL_RO), 0);
BUG_ON(ret);
}
@@ -432,14 +433,14 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
pfn = mfn_to_pfn(frame_list[i]);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long p;
- struct page *pg;
- pg = __get_cpu_var(balloon_scratch_page);
- p = page_to_pfn(pg);
+ p = page_to_pfn(scratch_page);
__set_phys_to_machine(pfn, pfn_to_mfn(p));
}
balloon_append(pfn_to_page(pfn));
}
+ put_balloon_scratch_page();
+
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);